From 9c044a920c31054fa106fb028e9115a3bd852cf8 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Sat, 19 Jan 2019 17:48:34 -0600 Subject: [PATCH] v 0.15.0 (#493) --- README.md | 2 +- docs/_site/{cinder => custom_theme}/404.html | 0 docs/_site/{cinder => custom_theme}/base.html | 0 .../{cinder => custom_theme}/content.html | 0 .../{cinder => custom_theme}/css/base.css | 2 +- .../css/bootstrap-custom.css | 0 .../css/bootstrap-custom.min.css | 0 .../{cinder => custom_theme}/css/cinder.css | 15 + .../css/font-awesome-4.0.3.css | 0 .../css/highlight.css | 0 .../fonts/fontawesome-webfont.eot | Bin .../fonts/fontawesome-webfont.svg | 0 .../fonts/fontawesome-webfont.ttf | Bin .../fonts/fontawesome-webfont.woff | Bin .../{cinder => custom_theme}/img/favicon.ico | Bin .../{cinder => custom_theme}/img/grid1.png | Bin .../{cinder => custom_theme}/img/grid10.png | Bin .../{cinder => custom_theme}/img/grid11.png | Bin .../{cinder => custom_theme}/img/grid12.png | Bin .../{cinder => custom_theme}/img/grid13.png | Bin .../{cinder => custom_theme}/img/grid14.png | Bin .../{cinder => custom_theme}/img/grid15.png | Bin .../{cinder => custom_theme}/img/grid16.png | Bin .../{cinder => custom_theme}/img/grid17.png | Bin .../{cinder => custom_theme}/img/grid18.png | Bin .../{cinder => custom_theme}/img/grid19.png | Bin .../{cinder => custom_theme}/img/grid2.png | Bin .../{cinder => custom_theme}/img/grid20.png | Bin .../{cinder => custom_theme}/img/grid3.png | Bin .../{cinder => custom_theme}/img/grid4.png | Bin .../{cinder => custom_theme}/img/grid5.png | Bin .../{cinder => custom_theme}/img/grid6.png | Bin .../{cinder => custom_theme}/img/grid7.png | Bin .../{cinder => custom_theme}/img/grid8.png | Bin .../{cinder => custom_theme}/img/grid9.png | Bin .../_site/{cinder => custom_theme}/js/base.js | 0 .../js/bootstrap-3.0.3.min.js | 0 .../js/highlight.pack.js | 0 .../js/jquery-1.10.2.min.js | 0 docs/_site/{cinder => custom_theme}/main.html | 0 .../{cinder => custom_theme}/mkdocs_theme.yml | 0 .../{cinder => custom_theme}/nav-sub.html | 0 docs/_site/{cinder => custom_theme}/nav.html | 8 +- docs/_site/{cinder => custom_theme}/toc.html | 4 +- docs/_site/ipynb2markdown.py | 2 +- docs/_site/make_api.py | 3 +- docs/_site/make_userguide.py | 2 +- docs/_site/md2pdf.py | 2 +- docs/_site/mkdocs.yml | 8 +- docs/_site/site/CHANGELOG/index.html | 118 +-- docs/_site/site/CONTRIBUTING/index.html | 34 +- docs/_site/site/USER_GUIDE_INDEX/index.html | 46 +- .../mlxtend.classifier/Adaline/index.html | 14 +- .../EnsembleVoteClassifier/index.html | 14 +- .../LogisticRegression/index.html | 14 +- .../MultiLayerPerceptron/index.html | 14 +- .../mlxtend.classifier/Perceptron/index.html | 14 +- .../SoftmaxRegression/index.html | 14 +- .../StackingCVClassifier/index.html | 14 +- .../StackingClassifier/index.html | 14 +- .../mlxtend.cluster/Kmeans/index.html | 14 +- .../mlxtend.data/autompg_data/index.html | 12 +- .../boston_housing_data/index.html | 12 +- .../mlxtend.data/iris_data/index.html | 12 +- .../mlxtend.data/loadlocal_mnist/index.html | 12 +- .../make_multiplexer_dataset/index.html | 12 +- .../mlxtend.data/mnist_data/index.html | 12 +- .../mlxtend.data/three_blobs_data/index.html | 12 +- .../mlxtend.data/wine_data/index.html | 12 +- .../BootstrapOutOfBag/index.html | 14 +- .../PredefinedHoldoutSplit/index.html | 14 +- .../RandomHoldoutSplit/index.html | 14 +- .../mlxtend.evaluate/bootstrap/index.html | 12 +- .../bootstrap_point632_score/index.html | 12 +- .../mlxtend.evaluate/cochrans_q/index.html | 12 +- .../combined_ftest_5x2cv/index.html | 12 +- .../confusion_matrix/index.html | 12 +- .../feature_importance_permutation/index.html | 12 +- .../mlxtend.evaluate/ftest/index.html | 12 +- .../mlxtend.evaluate/lift_score/index.html | 12 +- .../mlxtend.evaluate/mcnemar/index.html | 12 +- .../mlxtend.evaluate/mcnemar_table/index.html | 12 +- .../mcnemar_tables/index.html | 12 +- .../paired_ttest_5x2cv/index.html | 12 +- .../paired_ttest_kfold_cv/index.html | 12 +- .../paired_ttest_resampled/index.html | 12 +- .../permutation_test/index.html | 12 +- .../proportion_difference/index.html | 12 +- .../mlxtend.evaluate/scoring/index.html | 12 +- .../LinearDiscriminantAnalysis/index.html | 14 +- .../PrincipalComponentAnalysis/index.html | 14 +- .../RBFKernelPCA/index.html | 14 +- .../ColumnSelector/index.html | 14 +- .../ExhaustiveFeatureSelector/index.html | 14 +- .../SequentialFeatureSelector/index.html | 14 +- .../find_filegroups/index.html | 12 +- .../mlxtend.file_io/find_files/index.html | 12 +- .../apriori/index.html | 12 +- .../association_rules/index.html | 12 +- .../extract_face_landmarks/index.html | 12 +- .../mlxtend.math/factorial/index.html | 12 +- .../mlxtend.math/num_combinations/index.html | 12 +- .../mlxtend.math/num_permutations/index.html | 12 +- .../vectorspace_dimensionality/index.html | 12 +- .../vectorspace_orthonormalization/index.html | 12 +- .../category_scatter/index.html | 12 +- .../checkerboard_plot/index.html | 12 +- .../mlxtend.plotting/ecdf/index.html | 12 +- .../enrichment_plot/index.html | 12 +- .../plot_confusion_matrix/index.html | 12 +- .../plot_decision_regions/index.html | 12 +- .../plot_learning_curves/index.html | 12 +- .../plot_linear_regression/index.html | 12 +- .../index.html | 12 +- .../remove_borders/index.html | 12 +- .../scatterplotmatrix/index.html | 12 +- .../stacked_barplot/index.html | 12 +- .../CopyTransformer/index.html | 14 +- .../DenseTransformer/index.html | 14 +- .../MeanCenterer/index.html | 14 +- .../OnehotTransactions/index.html | 14 +- .../TransactionEncoder/index.html | 14 +- .../minmax_scaling/index.html | 12 +- .../mlxtend.preprocessing/one_hot/index.html | 12 +- .../shuffle_arrays_unison/index.html | 12 +- .../standardize/index.html | 12 +- .../LinearRegression/index.html | 14 +- .../StackingCVRegressor/index.html | 14 +- .../StackingRegressor/index.html | 16 +- .../mlxtend.text/generalize_names/index.html | 12 +- .../generalize_names_duplcheck/index.html | 12 +- .../tokenizer_emoticons/index.html | 12 +- .../tokenizer_words_and_emoticons/index.html | 12 +- .../mlxtend.utils/Counter/index.html | 14 +- .../mlxtend.utils/assert_raises/index.html | 12 +- .../mlxtend.utils/check_Xy/index.html | 12 +- .../format_kwarg_dictionaries/index.html | 12 +- .../api_subpackages/mlxtend._base/index.html | 12 +- .../mlxtend.classifier/index.html | 44 +- .../mlxtend.cluster/index.html | 16 +- .../api_subpackages/mlxtend.data/index.html | 14 +- .../mlxtend.evaluate/index.html | 20 +- .../mlxtend.externals/index.html | 12 +- .../mlxtend.feature_extraction/index.html | 24 +- .../mlxtend.feature_selection/index.html | 20 +- .../mlxtend.file_io/index.html | 14 +- .../mlxtend.frequent_patterns/index.html | 16 +- .../api_subpackages/mlxtend.image/index.html | 24 +- .../api_subpackages/mlxtend.math/index.html | 12 +- .../mlxtend.plotting/index.html | 16 +- .../mlxtend.preprocessing/index.html | 24 +- .../mlxtend.regressor/index.html | 24 +- .../api_subpackages/mlxtend.text/index.html | 14 +- .../api_subpackages/mlxtend.utils/index.html | 16 +- docs/_site/site/cite/index.html | 14 +- docs/_site/site/contributors/index.html | 14 +- docs/_site/site/css/base.css | 2 +- docs/_site/site/css/cinder.css | 15 + docs/_site/site/discuss/index.html | 14 +- docs/_site/site/index.html | 16 +- docs/_site/site/installation/index.html | 22 +- docs/_site/site/license/index.html | 20 +- docs/_site/site/search/search_index.json | 2 +- docs/_site/site/sitemap.xml | 211 ++--- docs/_site/site/sitemap.xml.gz | Bin 1301 -> 1306 bytes .../user_guide/classifier/Adaline/index.html | 28 +- .../EnsembleVoteClassifier/index.html | 40 +- .../classifier/LogisticRegression/index.html | 28 +- .../MultiLayerPerceptron/index.html | 26 +- .../classifier/Perceptron/index.html | 24 +- .../classifier/SoftmaxRegression/index.html | 24 +- .../StackingCVClassifier/index.html | 26 +- .../classifier/StackingClassifier.ipynb | 9 +- .../classifier/StackingClassifier/index.html | 29 +- .../StackingClassifier_14_0.png | Bin 48974 -> 32307 bytes .../site/user_guide/cluster/Kmeans/index.html | 26 +- .../user_guide/data/autompg_data/index.html | 22 +- .../data/boston_housing_data/index.html | 22 +- .../site/user_guide/data/iris_data/index.html | 22 +- .../data/loadlocal_mnist/index.html | 26 +- .../data/make_multiplexer_dataset/index.html | 22 +- .../user_guide/data/mnist_data/index.html | 24 +- .../data/three_blobs_data/index.html | 22 +- .../site/user_guide/data/wine_data/index.html | 22 +- .../evaluate/BootstrapOutOfBag.ipynb | 2 +- .../evaluate/BootstrapOutOfBag/index.html | 24 +- .../PredefinedHoldoutSplit/index.html | 24 +- .../evaluate/RandomHoldoutSplit/index.html | 24 +- .../user_guide/evaluate/bootstrap/index.html | 24 +- .../bootstrap_point632_score/index.html | 26 +- .../user_guide/evaluate/cochrans_q/index.html | 22 +- .../evaluate/combined_ftest_5x2cv.ipynb | 4 +- .../evaluate/combined_ftest_5x2cv/index.html | 26 +- .../evaluate/confusion_matrix/index.html | 26 +- .../feature_importance_permutation/index.html | 26 +- .../site/user_guide/evaluate/ftest/index.html | 22 +- .../user_guide/evaluate/lift_score/index.html | 24 +- .../site/user_guide/evaluate/mcnemar.ipynb | 2 +- .../user_guide/evaluate/mcnemar/index.html | 26 +- .../evaluate/mcnemar_table/index.html | 22 +- .../evaluate/mcnemar_tables/index.html | 24 +- .../evaluate/paired_ttest_5x2cv/index.html | 22 +- .../evaluate/paired_ttest_kfold_cv/index.html | 22 +- .../paired_ttest_resampled/index.html | 22 +- .../evaluate/permutation_test/index.html | 24 +- .../evaluate/proportion_difference/index.html | 20 +- .../user_guide/evaluate/scoring/index.html | 22 +- .../LinearDiscriminantAnalysis/index.html | 26 +- .../PrincipalComponentAnalysis/index.html | 30 +- .../RBFKernelPCA/index.html | 34 +- .../feature_selection/ColumnSelector.ipynb | 115 ++- .../ColumnSelector/index.html | 65 +- .../ExhaustiveFeatureSelector/index.html | 34 +- .../SequentialFeatureSelector/index.html | 48 +- .../file_io/find_filegroups/index.html | 22 +- .../user_guide/file_io/find_files/index.html | 22 +- .../frequent_patterns/apriori/index.html | 14 +- .../association_rules/index.html | 18 +- .../activation-functions/index.html | 14 +- .../gradient-optimization.ipynb | 10 +- .../gradient-optimization/index.html | 30 +- .../linear-gradient-derivative/index.html | 14 +- .../regularization-linear/index.html | 22 +- .../image/extract_face_landmarks/index.html | 22 +- .../math/num_combinations/index.html | 24 +- .../math/num_permutations/index.html | 24 +- .../vectorspace_dimensionality/index.html | 22 +- .../vectorspace_orthonormalization/index.html | 22 +- .../plotting/category_scatter/index.html | 24 +- .../plotting/checkerboard_plot/index.html | 24 +- .../site/user_guide/plotting/ecdf/index.html | 26 +- .../plotting/enrichment_plot/index.html | 22 +- .../plotting/plot_confusion_matrix/index.html | 26 +- .../plotting/plot_decision_regions.ipynb | 102 +-- .../plotting/plot_decision_regions/index.html | 107 +-- .../plot_decision_regions_11_0.png | Bin 46568 -> 45748 bytes .../plot_decision_regions_19_0.png | Bin 69313 -> 69356 bytes .../plot_decision_regions_21_0.png | Bin 38399 -> 38363 bytes .../plot_decision_regions_23_0.png | Bin 85297 -> 85295 bytes .../plot_decision_regions_25_0.png | Bin 8462 -> 8234 bytes .../plot_decision_regions_27_0.png | Bin 18317 -> 18317 bytes .../plot_decision_regions_29_0.png | Bin 41260 -> 41260 bytes .../plotting/plot_learning_curves/index.html | 20 +- .../plot_linear_regression/index.html | 22 +- .../index.html | 16 +- .../plotting/scatterplotmatrix/index.html | 24 +- .../plotting/stacked_barplot/index.html | 22 +- .../preprocessing/CopyTransformer/index.html | 20 +- .../preprocessing/DenseTransformer/index.html | 20 +- .../preprocessing/MeanCenterer/index.html | 20 +- .../TransactionEncoder/index.html | 16 +- .../preprocessing/minmax_scaling/index.html | 20 +- .../preprocessing/one-hot_encoding/index.html | 14 +- .../shuffle_arrays_unison/index.html | 18 +- .../preprocessing/standardize/index.html | 24 +- .../regressor/LinearRegression.ipynb | 132 ++- .../regressor/LinearRegression/index.html | 105 ++- .../LinearRegression_13_1.png | Bin 0 -> 7168 bytes .../LinearRegression_15_2.png | Bin 0 -> 7125 bytes .../LinearRegression_16_0.png | Bin 0 -> 8368 bytes .../LinearRegression_18_1.png | Bin 0 -> 7093 bytes .../LinearRegression_19_0.png | Bin 0 -> 10778 bytes .../LinearRegression_21_1.png | Bin 0 -> 7136 bytes .../LinearRegression_22_0.png | Bin 0 -> 11459 bytes .../regressor/StackingCVRegressor/index.html | 26 +- .../regressor/StackingRegressor/index.html | 26 +- .../text/generalize_names/index.html | 24 +- .../text/generalize_names_duplcheck.ipynb | 4 +- .../generalize_names_duplcheck/index.html | 24 +- .../site/user_guide/text/tokenizer/index.html | 24 +- .../site/user_guide/utils/Counter/index.html | 24 +- docs/_site/sources/CHANGELOG.md | 24 + docs/_site/sources/USER_GUIDE_INDEX.md | 1 + .../mlxtend.evaluate/bias_variance_decomp.md | 57 ++ .../PrincipalComponentAnalysis.md | 11 +- .../api_modules/mlxtend.image/EyepadAlign.md | 168 ++++ .../mlxtend.image/extract_face_landmarks.md | 1 + .../sources/api_subpackages/mlxtend._base.md | 2 +- .../api_subpackages/mlxtend.classifier.md | 2 +- .../api_subpackages/mlxtend.cluster.md | 2 +- .../sources/api_subpackages/mlxtend.data.md | 2 +- .../api_subpackages/mlxtend.evaluate.md | 62 +- .../api_subpackages/mlxtend.externals.md | 2 +- .../mlxtend.feature_extraction.md | 13 +- .../mlxtend.feature_selection.md | 2 +- .../api_subpackages/mlxtend.file_io.md | 2 +- .../mlxtend.frequent_patterns.md | 2 +- .../sources/api_subpackages/mlxtend.image.md | 174 +++- .../sources/api_subpackages/mlxtend.math.md | 2 +- .../api_subpackages/mlxtend.plotting.md | 2 +- .../api_subpackages/mlxtend.preprocessing.md | 2 +- .../api_subpackages/mlxtend.regressor.md | 2 +- .../sources/api_subpackages/mlxtend.text.md | 2 +- .../sources/api_subpackages/mlxtend.utils.md | 2 +- docs/_site/sources/license.md | 2 +- .../classifier/StackingClassifier.ipynb | 9 +- .../classifier/StackingClassifier.md | 4 +- .../StackingClassifier_14_0.png | Bin 48974 -> 32307 bytes .../evaluate/BootstrapOutOfBag.ipynb | 2 +- .../evaluate/bias_variance_decomp.ipynb | 514 +++++++++++ .../evaluate/bias_variance_decomp.md | 371 ++++++++ .../high-bias-plot.png | Bin 0 -> 324935 bytes .../image-20181029010428686.png | Bin 0 -> 114672 bytes .../varianceplot.png | Bin 0 -> 368896 bytes .../evaluate/combined_ftest_5x2cv.ipynb | 4 +- .../evaluate/combined_ftest_5x2cv.md | 4 +- .../evaluate/confusion_matrix.ipynb | 2 +- .../sources/user_guide/evaluate/mcnemar.ipynb | 2 +- .../PrincipalComponentAnalysis.ipynb | 359 +++++++- .../PrincipalComponentAnalysis.md | 213 ++++- .../PrincipalComponentAnalysis_11_0.png | Bin 26413 -> 13742 bytes .../PrincipalComponentAnalysis_15_0.png | Bin 21425 -> 12270 bytes .../PrincipalComponentAnalysis_19_0.png | Bin 26348 -> 13742 bytes .../PrincipalComponentAnalysis_24_0.png | Bin 10062 -> 10190 bytes .../PrincipalComponentAnalysis_33_0.png | Bin 0 -> 12688 bytes .../PrincipalComponentAnalysis_37_0.png | Bin 0 -> 15260 bytes .../feature_selection/ColumnSelector.ipynb | 115 ++- .../feature_selection/ColumnSelector.md | 46 +- .../gradient-optimization.ipynb | 10 +- .../general_concepts/gradient-optimization.md | 4 +- .../user_guide/image/README_test-face.txt | 5 + .../user_guide/image/celeba-subset/000001.jpg | Bin 0 -> 11440 bytes .../user_guide/image/celeba-subset/000002.jpg | Bin 0 -> 7448 bytes .../user_guide/image/celeba-subset/000003.jpg | Bin 0 -> 4253 bytes .../user_guide/image/celeba-subset/000004.jpg | Bin 0 -> 10747 bytes .../user_guide/image/celeba-subset/000005.jpg | Bin 0 -> 6351 bytes .../user_guide/image/celeba-subset/000006.jpg | Bin 0 -> 8073 bytes .../user_guide/image/celeba-subset/000007.jpg | Bin 0 -> 8203 bytes .../user_guide/image/celeba-subset/000008.jpg | Bin 0 -> 7725 bytes .../user_guide/image/celeba-subset/000009.jpg | Bin 0 -> 8641 bytes .../image/extract_face_landmarks.ipynb | 101 ++- .../image/extract_face_landmarks.md | 70 +- .../extract_face_landmarks_12_1.png | Bin 0 -> 30843 bytes .../extract_face_landmarks_9_0.png | Bin 165381 -> 153710 bytes .../user_guide/image/eyepad_align.ipynb | 804 ++++++++++++++++++ .../sources/user_guide/image/eyepad_align.md | 509 +++++++++++ .../eyepad_align_files/eyepad_align_11_0.png | Bin 0 -> 288233 bytes .../eyepad_align_files/eyepad_align_16_0.png | Bin 0 -> 44776 bytes .../eyepad_align_files/eyepad_align_18_0.png | Bin 0 -> 189307 bytes .../eyepad_align_files/eyepad_align_22_1.png | Bin 0 -> 189779 bytes .../eyepad_align_files/eyepad_align_27_0.png | Bin 0 -> 263921 bytes docs/_site/sources/user_guide/image/lena.png | Bin 479778 -> 0 bytes .../sources/user_guide/image/test-face.png | Bin 0 -> 232994 bytes .../plotting/plot_decision_regions.ipynb | 102 +-- .../plotting/plot_decision_regions.md | 76 +- .../plot_decision_regions_11_0.png | Bin 46568 -> 45748 bytes .../plot_decision_regions_19_0.png | Bin 69313 -> 69356 bytes .../plot_decision_regions_21_0.png | Bin 38399 -> 38363 bytes .../plot_decision_regions_23_0.png | Bin 85297 -> 85295 bytes .../plot_decision_regions_25_0.png | Bin 8462 -> 8234 bytes .../plot_decision_regions_27_0.png | Bin 18317 -> 18317 bytes .../plot_decision_regions_29_0.png | Bin 41260 -> 41260 bytes .../regressor/LinearRegression.ipynb | 132 ++- .../user_guide/regressor/LinearRegression.md | 75 +- .../LinearRegression_13_1.png | Bin 0 -> 7168 bytes .../LinearRegression_15_2.png | Bin 0 -> 7125 bytes .../LinearRegression_16_0.png | Bin 0 -> 8368 bytes .../LinearRegression_18_1.png | Bin 0 -> 7093 bytes .../LinearRegression_19_0.png | Bin 0 -> 10778 bytes .../LinearRegression_21_1.png | Bin 0 -> 7136 bytes .../LinearRegression_22_0.png | Bin 0 -> 11459 bytes .../text/generalize_names_duplcheck.ipynb | 4 +- .../text/generalize_names_duplcheck.md | 2 +- docs/ipynb2markdown.py | 2 +- docs/make_api.py | 2 +- docs/make_userguide.py | 2 +- docs/md2pdf.py | 2 +- docs/mkdocs.yml | 2 +- docs/sources/CHANGELOG.md | 2 +- docs/sources/license.md | 2 +- .../PrincipalComponentAnalysis_11_0.png | Bin 26413 -> 13742 bytes .../PrincipalComponentAnalysis_15_0.png | Bin 21425 -> 12270 bytes .../PrincipalComponentAnalysis_19_0.png | Bin 26348 -> 13742 bytes .../PrincipalComponentAnalysis_24_0.png | Bin 10062 -> 10190 bytes .../PrincipalComponentAnalysis_33_0.png | Bin 0 -> 12688 bytes .../PrincipalComponentAnalysis_37_0.png | Bin 0 -> 15260 bytes .../extract_face_landmarks_12_1.png | Bin 0 -> 30843 bytes .../extract_face_landmarks_9_0.png | Bin 165381 -> 153710 bytes .../eyepad_align_files/eyepad_align_11_0.png | Bin 0 -> 288233 bytes .../eyepad_align_files/eyepad_align_16_0.png | Bin 0 -> 44776 bytes .../eyepad_align_files/eyepad_align_18_0.png | Bin 0 -> 189307 bytes .../eyepad_align_files/eyepad_align_22_1.png | Bin 0 -> 189779 bytes .../eyepad_align_files/eyepad_align_27_0.png | Bin 0 -> 263921 bytes mlxtend/__init__.py | 4 +- mlxtend/_base/__init__.py | 2 +- mlxtend/_base/_base_model.py | 2 +- mlxtend/_base/_classifier.py | 2 +- mlxtend/_base/_cluster.py | 2 +- mlxtend/_base/_iterative_model.py | 2 +- mlxtend/_base/_multiclass.py | 2 +- mlxtend/_base/_multilayer.py | 2 +- mlxtend/_base/_regressor.py | 2 +- mlxtend/_base/oldtests/test_base_estimator.py | 2 +- mlxtend/_base/tests/test_base_model.py | 2 +- mlxtend/_base/tests/test_classifier.py | 2 +- mlxtend/_base/tests/test_cluster.py | 2 +- mlxtend/_base/tests/test_iterative_model.py | 2 +- mlxtend/_base/tests/test_multiclass.py | 2 +- mlxtend/_base/tests/test_multilayer.py | 2 +- mlxtend/_base/tests/test_regressor.py | 2 +- mlxtend/classifier/__init__.py | 2 +- mlxtend/classifier/adaline.py | 2 +- mlxtend/classifier/ensemble_vote.py | 2 +- mlxtend/classifier/logistic_regression.py | 2 +- mlxtend/classifier/multilayerperceptron.py | 2 +- mlxtend/classifier/perceptron.py | 2 +- mlxtend/classifier/softmax_regression.py | 2 +- mlxtend/classifier/stacking_classification.py | 2 +- .../classifier/stacking_cv_classification.py | 2 +- mlxtend/classifier/tests/test_adaline.py | 2 +- .../tests/test_ensemble_vote_classifier.py | 2 +- .../tests/test_logistic_regression.py | 2 +- .../tests/test_multilayerperceptron.py | 2 +- mlxtend/classifier/tests/test_perceptron.py | 2 +- .../tests/test_softmax_regression.py | 2 +- .../tests/test_stacking_classifier.py | 2 +- .../tests/test_stacking_cv_classifier.py | 2 +- mlxtend/cluster/__init__.py | 2 +- mlxtend/cluster/kmeans.py | 2 +- mlxtend/cluster/tests/test_kmeans.py | 2 +- mlxtend/data/__init__.py | 2 +- mlxtend/data/autompg.py | 2 +- mlxtend/data/boston_housing.py | 2 +- mlxtend/data/iris.py | 2 +- mlxtend/data/local_mnist.py | 2 +- mlxtend/data/mnist.py | 2 +- mlxtend/data/multiplexer.py | 2 +- mlxtend/data/tests/test_datasets.py | 2 +- mlxtend/data/tests/test_multiplexer.py | 2 +- mlxtend/data/three_blobs.py | 2 +- mlxtend/data/wine.py | 2 +- mlxtend/evaluate/__init__.py | 2 +- mlxtend/evaluate/bias_variance_decomp.py | 2 +- mlxtend/evaluate/bootstrap.py | 2 +- mlxtend/evaluate/bootstrap_outofbag.py | 2 +- mlxtend/evaluate/bootstrap_point632.py | 2 +- mlxtend/evaluate/cochrans_q.py | 2 +- mlxtend/evaluate/confusion_matrix.py | 2 +- mlxtend/evaluate/f_test.py | 2 +- mlxtend/evaluate/feature_importance.py | 2 +- mlxtend/evaluate/holdout.py | 2 +- mlxtend/evaluate/mcnemar.py | 2 +- mlxtend/evaluate/permutation.py | 2 +- mlxtend/evaluate/proportion_difference.py | 2 +- mlxtend/evaluate/scoring.py | 2 +- .../tests/test_bias_variance_decomp.py | 2 +- mlxtend/evaluate/tests/test_bootstrap.py | 2 +- .../evaluate/tests/test_bootstrap_outofbag.py | 2 +- .../evaluate/tests/test_bootstrap_point632.py | 2 +- mlxtend/evaluate/tests/test_cochran_q.py | 2 +- .../tests/test_combined_ftest_5x2cv.py | 2 +- .../evaluate/tests/test_confusion_matrix.py | 2 +- mlxtend/evaluate/tests/test_f_test.py | 2 +- .../evaluate/tests/test_feature_importance.py | 2 +- mlxtend/evaluate/tests/test_holdout.py | 2 +- mlxtend/evaluate/tests/test_mcnemar_table.py | 2 +- mlxtend/evaluate/tests/test_mcnemar_tables.py | 2 +- mlxtend/evaluate/tests/test_mcnemar_test.py | 2 +- .../evaluate/tests/test_paired_ttest_5x2cv.py | 2 +- .../evaluate/tests/test_paired_ttest_kfold.py | 2 +- .../tests/test_paired_ttest_resampled.py | 2 +- mlxtend/evaluate/tests/test_permutation.py | 2 +- .../tests/test_proportion_difference.py | 2 +- mlxtend/evaluate/tests/test_scoring.py | 2 +- mlxtend/evaluate/ttest.py | 2 +- mlxtend/externals/__init__.py | 2 +- mlxtend/feature_extraction/__init__.py | 2 +- mlxtend/feature_extraction/base.py | 2 +- .../linear_discriminant_analysis.py | 2 +- .../principal_component_analysis.py | 2 +- mlxtend/feature_extraction/rbf_kernel_pca.py | 2 +- mlxtend/feature_extraction/tests/test_base.py | 2 +- .../tests/test_kernel_pca.py | 2 +- .../test_linear_discriminant_analysis.py | 2 +- .../test_principal_component_analysis.py | 2 +- mlxtend/feature_selection/__init__.py | 2 +- mlxtend/feature_selection/column_selector.py | 2 +- .../exhaustive_feature_selector.py | 2 +- .../sequential_feature_selector.py | 2 +- .../tests/test_column_selector.py | 2 +- .../tests/test_exhaustive_feature_selector.py | 2 +- .../tests/test_sequential_feature_selector.py | 2 +- mlxtend/file_io/__init__.py | 2 +- mlxtend/file_io/find_filegroups.py | 2 +- mlxtend/file_io/find_files.py | 2 +- mlxtend/frequent_patterns/__init__.py | 2 +- mlxtend/frequent_patterns/apriori.py | 2 +- .../frequent_patterns/association_rules.py | 2 +- .../frequent_patterns/tests/test_apriori.py | 2 +- mlxtend/image/__init__.py | 2 +- mlxtend/image/extract_face_landmarks.py | 2 +- mlxtend/image/eyepad_align.py | 2 +- .../tests/test_extract_face_landmarks.py | 2 +- mlxtend/image/tests/test_eyepad_align.py | 2 +- mlxtend/image/utils.py | 2 +- mlxtend/math/__init__.py | 2 +- mlxtend/math/counting.py | 2 +- mlxtend/math/linalg.py | 2 +- mlxtend/math/tests/test_counting.py | 2 +- mlxtend/math/tests/test_linalg.py | 2 +- mlxtend/plotting/__init__.py | 2 +- mlxtend/plotting/checkerboard.py | 2 +- mlxtend/plotting/decision_regions.py | 2 +- mlxtend/plotting/ecdf.py | 2 +- mlxtend/plotting/enrichment_plot.py | 2 +- mlxtend/plotting/learning_curves.py | 2 +- mlxtend/plotting/plot_confusion_matrix.py | 2 +- mlxtend/plotting/plot_linear_regression.py | 2 +- .../plot_sequential_feature_selection.py | 2 +- mlxtend/plotting/remove_chartjunk.py | 2 +- mlxtend/plotting/scatter.py | 2 +- mlxtend/plotting/scatterplotmatrix.py | 2 +- mlxtend/plotting/stacked_barplot.py | 2 +- mlxtend/plotting/tests/test_checkerboard.py | 2 +- .../plotting/tests/test_decision_regions.py | 2 +- mlxtend/plotting/tests/test_ecdf.py | 2 +- .../plotting/tests/test_learning_curves.py | 2 +- mlxtend/preprocessing/__init__.py | 2 +- mlxtend/preprocessing/copy_transformer.py | 2 +- mlxtend/preprocessing/dense_transformer.py | 2 +- mlxtend/preprocessing/mean_centering.py | 2 +- mlxtend/preprocessing/onehot.py | 2 +- mlxtend/preprocessing/scaling.py | 2 +- mlxtend/preprocessing/shuffle.py | 2 +- .../tests/test__scaling__minmax_scaling.py | 2 +- .../tests/test__scaling__standardizing.py | 2 +- .../tests/test_copy_transformer.py | 2 +- .../tests/test_dense_transformer.py | 2 +- .../tests/test_mean_centering.py | 2 +- mlxtend/preprocessing/tests/test_onehot.py | 2 +- .../test_shuffle_shuffle_arrays_unison.py | 2 +- .../tests/test_transactionencoder.py | 2 +- mlxtend/preprocessing/transactionencoder.py | 2 +- mlxtend/regressor/__init__.py | 2 +- mlxtend/regressor/linear_regression.py | 2 +- mlxtend/regressor/stacking_cv_regression.py | 2 +- mlxtend/regressor/stacking_regression.py | 2 +- .../regressor/tests/test_linear_regression.py | 2 +- .../tests/test_stacking_cv_regression.py | 2 +- .../tests/test_stacking_regression.py | 2 +- mlxtend/text/__init__.py | 2 +- mlxtend/text/names.py | 2 +- mlxtend/text/tokenizer.py | 2 +- mlxtend/utils/__init__.py | 2 +- mlxtend/utils/checking.py | 2 +- mlxtend/utils/counter.py | 2 +- mlxtend/utils/testing.py | 2 +- mlxtend/utils/tests/test_checking_inputs.py | 2 +- mlxtend/utils/tests/test_counter.py | 2 +- mlxtend/utils/tests/test_testing.py | 2 +- 550 files changed, 7005 insertions(+), 1929 deletions(-) rename docs/_site/{cinder => custom_theme}/404.html (100%) rename docs/_site/{cinder => custom_theme}/base.html (100%) rename docs/_site/{cinder => custom_theme}/content.html (100%) rename docs/_site/{cinder => custom_theme}/css/base.css (99%) rename docs/_site/{cinder => custom_theme}/css/bootstrap-custom.css (100%) rename docs/_site/{cinder => custom_theme}/css/bootstrap-custom.min.css (100%) rename docs/_site/{cinder => custom_theme}/css/cinder.css (90%) rename docs/_site/{cinder => custom_theme}/css/font-awesome-4.0.3.css (100%) rename docs/_site/{cinder => custom_theme}/css/highlight.css (100%) rename docs/_site/{cinder => custom_theme}/fonts/fontawesome-webfont.eot (100%) rename docs/_site/{cinder => custom_theme}/fonts/fontawesome-webfont.svg (100%) rename docs/_site/{cinder => custom_theme}/fonts/fontawesome-webfont.ttf (100%) rename docs/_site/{cinder => custom_theme}/fonts/fontawesome-webfont.woff (100%) rename docs/_site/{cinder => custom_theme}/img/favicon.ico (100%) rename docs/_site/{cinder => custom_theme}/img/grid1.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid10.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid11.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid12.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid13.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid14.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid15.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid16.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid17.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid18.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid19.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid2.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid20.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid3.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid4.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid5.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid6.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid7.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid8.png (100%) rename docs/_site/{cinder => custom_theme}/img/grid9.png (100%) rename docs/_site/{cinder => custom_theme}/js/base.js (100%) rename docs/_site/{cinder => custom_theme}/js/bootstrap-3.0.3.min.js (100%) rename docs/_site/{cinder => custom_theme}/js/highlight.pack.js (100%) rename docs/_site/{cinder => custom_theme}/js/jquery-1.10.2.min.js (100%) rename docs/_site/{cinder => custom_theme}/main.html (100%) rename docs/_site/{cinder => custom_theme}/mkdocs_theme.yml (100%) rename docs/_site/{cinder => custom_theme}/nav-sub.html (100%) rename docs/_site/{cinder => custom_theme}/nav.html (95%) rename docs/_site/{cinder => custom_theme}/toc.html (85%) create mode 100644 docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_13_1.png create mode 100644 docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_15_2.png create mode 100644 docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_16_0.png create mode 100644 docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_18_1.png create mode 100644 docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_19_0.png create mode 100644 docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_21_1.png create mode 100644 docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_22_0.png create mode 100644 docs/_site/sources/api_modules/mlxtend.evaluate/bias_variance_decomp.md create mode 100644 docs/_site/sources/api_modules/mlxtend.image/EyepadAlign.md create mode 100644 docs/_site/sources/user_guide/evaluate/bias_variance_decomp.ipynb create mode 100644 docs/_site/sources/user_guide/evaluate/bias_variance_decomp.md create mode 100644 docs/_site/sources/user_guide/evaluate/bias_variance_decomp_files/high-bias-plot.png create mode 100644 docs/_site/sources/user_guide/evaluate/bias_variance_decomp_files/image-20181029010428686.png create mode 100644 docs/_site/sources/user_guide/evaluate/bias_variance_decomp_files/varianceplot.png create mode 100644 docs/_site/sources/user_guide/feature_extraction/PrincipalComponentAnalysis_files/PrincipalComponentAnalysis_33_0.png create mode 100644 docs/_site/sources/user_guide/feature_extraction/PrincipalComponentAnalysis_files/PrincipalComponentAnalysis_37_0.png create mode 100644 docs/_site/sources/user_guide/image/README_test-face.txt create mode 100644 docs/_site/sources/user_guide/image/celeba-subset/000001.jpg create mode 100644 docs/_site/sources/user_guide/image/celeba-subset/000002.jpg create mode 100644 docs/_site/sources/user_guide/image/celeba-subset/000003.jpg create mode 100644 docs/_site/sources/user_guide/image/celeba-subset/000004.jpg create mode 100644 docs/_site/sources/user_guide/image/celeba-subset/000005.jpg create mode 100644 docs/_site/sources/user_guide/image/celeba-subset/000006.jpg create mode 100644 docs/_site/sources/user_guide/image/celeba-subset/000007.jpg create mode 100644 docs/_site/sources/user_guide/image/celeba-subset/000008.jpg create mode 100644 docs/_site/sources/user_guide/image/celeba-subset/000009.jpg create mode 100644 docs/_site/sources/user_guide/image/extract_face_landmarks_files/extract_face_landmarks_12_1.png create mode 100644 docs/_site/sources/user_guide/image/eyepad_align.ipynb create mode 100644 docs/_site/sources/user_guide/image/eyepad_align.md create mode 100644 docs/_site/sources/user_guide/image/eyepad_align_files/eyepad_align_11_0.png create mode 100644 docs/_site/sources/user_guide/image/eyepad_align_files/eyepad_align_16_0.png create mode 100644 docs/_site/sources/user_guide/image/eyepad_align_files/eyepad_align_18_0.png create mode 100644 docs/_site/sources/user_guide/image/eyepad_align_files/eyepad_align_22_1.png create mode 100644 docs/_site/sources/user_guide/image/eyepad_align_files/eyepad_align_27_0.png delete mode 100644 docs/_site/sources/user_guide/image/lena.png create mode 100644 docs/_site/sources/user_guide/image/test-face.png create mode 100644 docs/_site/sources/user_guide/regressor/LinearRegression_files/LinearRegression_13_1.png create mode 100644 docs/_site/sources/user_guide/regressor/LinearRegression_files/LinearRegression_15_2.png create mode 100644 docs/_site/sources/user_guide/regressor/LinearRegression_files/LinearRegression_16_0.png create mode 100644 docs/_site/sources/user_guide/regressor/LinearRegression_files/LinearRegression_18_1.png create mode 100644 docs/_site/sources/user_guide/regressor/LinearRegression_files/LinearRegression_19_0.png create mode 100644 docs/_site/sources/user_guide/regressor/LinearRegression_files/LinearRegression_21_1.png create mode 100644 docs/_site/sources/user_guide/regressor/LinearRegression_files/LinearRegression_22_0.png create mode 100644 docs/sources/user_guide/feature_extraction/PrincipalComponentAnalysis_files/PrincipalComponentAnalysis_33_0.png create mode 100644 docs/sources/user_guide/feature_extraction/PrincipalComponentAnalysis_files/PrincipalComponentAnalysis_37_0.png create mode 100644 docs/sources/user_guide/image/extract_face_landmarks_files/extract_face_landmarks_12_1.png create mode 100644 docs/sources/user_guide/image/eyepad_align_files/eyepad_align_11_0.png create mode 100644 docs/sources/user_guide/image/eyepad_align_files/eyepad_align_16_0.png create mode 100644 docs/sources/user_guide/image/eyepad_align_files/eyepad_align_18_0.png create mode 100644 docs/sources/user_guide/image/eyepad_align_files/eyepad_align_22_1.png create mode 100644 docs/sources/user_guide/image/eyepad_align_files/eyepad_align_27_0.png diff --git a/README.md b/README.md index 0e82f0f68..561563bf4 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@
-Sebastian Raschka 2014-2018 +Sebastian Raschka 2014-2019
diff --git a/docs/_site/cinder/404.html b/docs/_site/custom_theme/404.html similarity index 100% rename from docs/_site/cinder/404.html rename to docs/_site/custom_theme/404.html diff --git a/docs/_site/cinder/base.html b/docs/_site/custom_theme/base.html similarity index 100% rename from docs/_site/cinder/base.html rename to docs/_site/custom_theme/base.html diff --git a/docs/_site/cinder/content.html b/docs/_site/custom_theme/content.html similarity index 100% rename from docs/_site/cinder/content.html rename to docs/_site/custom_theme/content.html diff --git a/docs/_site/cinder/css/base.css b/docs/_site/custom_theme/css/base.css similarity index 99% rename from docs/_site/cinder/css/base.css rename to docs/_site/custom_theme/css/base.css index eb8e296fc..923228600 100755 --- a/docs/_site/cinder/css/base.css +++ b/docs/_site/custom_theme/css/base.css @@ -5,7 +5,7 @@ body { h1[id]:before, h2[id]:before, h3[id]:before, h4[id]:before, h5[id]:before, h6[id]:before { content: ""; display: block; - margin-top: -75px; + margin-top: 0px; height: 75px; } diff --git a/docs/_site/cinder/css/bootstrap-custom.css b/docs/_site/custom_theme/css/bootstrap-custom.css similarity index 100% rename from docs/_site/cinder/css/bootstrap-custom.css rename to docs/_site/custom_theme/css/bootstrap-custom.css diff --git a/docs/_site/cinder/css/bootstrap-custom.min.css b/docs/_site/custom_theme/css/bootstrap-custom.min.css similarity index 100% rename from docs/_site/cinder/css/bootstrap-custom.min.css rename to docs/_site/custom_theme/css/bootstrap-custom.min.css diff --git a/docs/_site/cinder/css/cinder.css b/docs/_site/custom_theme/css/cinder.css similarity index 90% rename from docs/_site/cinder/css/cinder.css rename to docs/_site/custom_theme/css/cinder.css index ce3cb62b2..47fbb9897 100755 --- a/docs/_site/cinder/css/cinder.css +++ b/docs/_site/custom_theme/css/cinder.css @@ -80,3 +80,18 @@ pre { footer > hr { width: 35%; } + + +/* + modified dropbox menu, Sebastian Raschka 2016 +*/ + +.dropdown-menu > li > a { + display: block; + padding: 5px 20px; + clear: both; + font-weight: normal; + line-height: 1.3; + color: #333333; + white-space: nowrap; +} diff --git a/docs/_site/cinder/css/font-awesome-4.0.3.css b/docs/_site/custom_theme/css/font-awesome-4.0.3.css similarity index 100% rename from docs/_site/cinder/css/font-awesome-4.0.3.css rename to docs/_site/custom_theme/css/font-awesome-4.0.3.css diff --git a/docs/_site/cinder/css/highlight.css b/docs/_site/custom_theme/css/highlight.css similarity index 100% rename from docs/_site/cinder/css/highlight.css rename to docs/_site/custom_theme/css/highlight.css diff --git a/docs/_site/cinder/fonts/fontawesome-webfont.eot b/docs/_site/custom_theme/fonts/fontawesome-webfont.eot similarity index 100% rename from docs/_site/cinder/fonts/fontawesome-webfont.eot rename to docs/_site/custom_theme/fonts/fontawesome-webfont.eot diff --git a/docs/_site/cinder/fonts/fontawesome-webfont.svg b/docs/_site/custom_theme/fonts/fontawesome-webfont.svg similarity index 100% rename from docs/_site/cinder/fonts/fontawesome-webfont.svg rename to docs/_site/custom_theme/fonts/fontawesome-webfont.svg diff --git a/docs/_site/cinder/fonts/fontawesome-webfont.ttf b/docs/_site/custom_theme/fonts/fontawesome-webfont.ttf similarity index 100% rename from docs/_site/cinder/fonts/fontawesome-webfont.ttf rename to docs/_site/custom_theme/fonts/fontawesome-webfont.ttf diff --git a/docs/_site/cinder/fonts/fontawesome-webfont.woff b/docs/_site/custom_theme/fonts/fontawesome-webfont.woff similarity index 100% rename from docs/_site/cinder/fonts/fontawesome-webfont.woff rename to docs/_site/custom_theme/fonts/fontawesome-webfont.woff diff --git a/docs/_site/cinder/img/favicon.ico b/docs/_site/custom_theme/img/favicon.ico similarity index 100% rename from docs/_site/cinder/img/favicon.ico rename to docs/_site/custom_theme/img/favicon.ico diff --git a/docs/_site/cinder/img/grid1.png b/docs/_site/custom_theme/img/grid1.png similarity index 100% rename from docs/_site/cinder/img/grid1.png rename to docs/_site/custom_theme/img/grid1.png diff --git a/docs/_site/cinder/img/grid10.png b/docs/_site/custom_theme/img/grid10.png similarity index 100% rename from docs/_site/cinder/img/grid10.png rename to docs/_site/custom_theme/img/grid10.png diff --git a/docs/_site/cinder/img/grid11.png b/docs/_site/custom_theme/img/grid11.png similarity index 100% rename from docs/_site/cinder/img/grid11.png rename to docs/_site/custom_theme/img/grid11.png diff --git a/docs/_site/cinder/img/grid12.png b/docs/_site/custom_theme/img/grid12.png similarity index 100% rename from docs/_site/cinder/img/grid12.png rename to docs/_site/custom_theme/img/grid12.png diff --git a/docs/_site/cinder/img/grid13.png b/docs/_site/custom_theme/img/grid13.png similarity index 100% rename from docs/_site/cinder/img/grid13.png rename to docs/_site/custom_theme/img/grid13.png diff --git a/docs/_site/cinder/img/grid14.png b/docs/_site/custom_theme/img/grid14.png similarity index 100% rename from docs/_site/cinder/img/grid14.png rename to docs/_site/custom_theme/img/grid14.png diff --git a/docs/_site/cinder/img/grid15.png b/docs/_site/custom_theme/img/grid15.png similarity index 100% rename from docs/_site/cinder/img/grid15.png rename to docs/_site/custom_theme/img/grid15.png diff --git a/docs/_site/cinder/img/grid16.png b/docs/_site/custom_theme/img/grid16.png similarity index 100% rename from docs/_site/cinder/img/grid16.png rename to docs/_site/custom_theme/img/grid16.png diff --git a/docs/_site/cinder/img/grid17.png b/docs/_site/custom_theme/img/grid17.png similarity index 100% rename from docs/_site/cinder/img/grid17.png rename to docs/_site/custom_theme/img/grid17.png diff --git a/docs/_site/cinder/img/grid18.png b/docs/_site/custom_theme/img/grid18.png similarity index 100% rename from docs/_site/cinder/img/grid18.png rename to docs/_site/custom_theme/img/grid18.png diff --git a/docs/_site/cinder/img/grid19.png b/docs/_site/custom_theme/img/grid19.png similarity index 100% rename from docs/_site/cinder/img/grid19.png rename to docs/_site/custom_theme/img/grid19.png diff --git a/docs/_site/cinder/img/grid2.png b/docs/_site/custom_theme/img/grid2.png similarity index 100% rename from docs/_site/cinder/img/grid2.png rename to docs/_site/custom_theme/img/grid2.png diff --git a/docs/_site/cinder/img/grid20.png b/docs/_site/custom_theme/img/grid20.png similarity index 100% rename from docs/_site/cinder/img/grid20.png rename to docs/_site/custom_theme/img/grid20.png diff --git a/docs/_site/cinder/img/grid3.png b/docs/_site/custom_theme/img/grid3.png similarity index 100% rename from docs/_site/cinder/img/grid3.png rename to docs/_site/custom_theme/img/grid3.png diff --git a/docs/_site/cinder/img/grid4.png b/docs/_site/custom_theme/img/grid4.png similarity index 100% rename from docs/_site/cinder/img/grid4.png rename to docs/_site/custom_theme/img/grid4.png diff --git a/docs/_site/cinder/img/grid5.png b/docs/_site/custom_theme/img/grid5.png similarity index 100% rename from docs/_site/cinder/img/grid5.png rename to docs/_site/custom_theme/img/grid5.png diff --git a/docs/_site/cinder/img/grid6.png b/docs/_site/custom_theme/img/grid6.png similarity index 100% rename from docs/_site/cinder/img/grid6.png rename to docs/_site/custom_theme/img/grid6.png diff --git a/docs/_site/cinder/img/grid7.png b/docs/_site/custom_theme/img/grid7.png similarity index 100% rename from docs/_site/cinder/img/grid7.png rename to docs/_site/custom_theme/img/grid7.png diff --git a/docs/_site/cinder/img/grid8.png b/docs/_site/custom_theme/img/grid8.png similarity index 100% rename from docs/_site/cinder/img/grid8.png rename to docs/_site/custom_theme/img/grid8.png diff --git a/docs/_site/cinder/img/grid9.png b/docs/_site/custom_theme/img/grid9.png similarity index 100% rename from docs/_site/cinder/img/grid9.png rename to docs/_site/custom_theme/img/grid9.png diff --git a/docs/_site/cinder/js/base.js b/docs/_site/custom_theme/js/base.js similarity index 100% rename from docs/_site/cinder/js/base.js rename to docs/_site/custom_theme/js/base.js diff --git a/docs/_site/cinder/js/bootstrap-3.0.3.min.js b/docs/_site/custom_theme/js/bootstrap-3.0.3.min.js similarity index 100% rename from docs/_site/cinder/js/bootstrap-3.0.3.min.js rename to docs/_site/custom_theme/js/bootstrap-3.0.3.min.js diff --git a/docs/_site/cinder/js/highlight.pack.js b/docs/_site/custom_theme/js/highlight.pack.js similarity index 100% rename from docs/_site/cinder/js/highlight.pack.js rename to docs/_site/custom_theme/js/highlight.pack.js diff --git a/docs/_site/cinder/js/jquery-1.10.2.min.js b/docs/_site/custom_theme/js/jquery-1.10.2.min.js similarity index 100% rename from docs/_site/cinder/js/jquery-1.10.2.min.js rename to docs/_site/custom_theme/js/jquery-1.10.2.min.js diff --git a/docs/_site/cinder/main.html b/docs/_site/custom_theme/main.html similarity index 100% rename from docs/_site/cinder/main.html rename to docs/_site/custom_theme/main.html diff --git a/docs/_site/cinder/mkdocs_theme.yml b/docs/_site/custom_theme/mkdocs_theme.yml similarity index 100% rename from docs/_site/cinder/mkdocs_theme.yml rename to docs/_site/custom_theme/mkdocs_theme.yml diff --git a/docs/_site/cinder/nav-sub.html b/docs/_site/custom_theme/nav-sub.html similarity index 100% rename from docs/_site/cinder/nav-sub.html rename to docs/_site/custom_theme/nav-sub.html diff --git a/docs/_site/cinder/nav.html b/docs/_site/custom_theme/nav.html similarity index 95% rename from docs/_site/cinder/nav.html rename to docs/_site/custom_theme/nav.html index 03469d111..3ab01cea1 100755 --- a/docs/_site/cinder/nav.html +++ b/docs/_site/custom_theme/nav.html @@ -55,7 +55,7 @@ {%- endif %} {%- endblock %} - {%- block next_prev %} + {%- block repo %} {%- if page and page.edit_url %}
  • - + {%- if config.repo_name == 'GitHub' -%} - Edit on {{ config.repo_name }} + GitHub {%- elif config.repo_name == 'Bitbucket' -%} Edit on {{ config.repo_name }} {%- elif config.repo_name == 'GitLab' -%} diff --git a/docs/_site/cinder/toc.html b/docs/_site/custom_theme/toc.html similarity index 85% rename from docs/_site/cinder/toc.html rename to docs/_site/custom_theme/toc.html index 26519c568..a8adf20ff 100755 --- a/docs/_site/cinder/toc.html +++ b/docs/_site/custom_theme/toc.html @@ -4,9 +4,9 @@
  • {{ toc_item.title }}
  • {%- for toc_item in toc_item.children %}
  • {{ toc_item.title }}
  • - {% for toc_item in toc_item.children %} + {%- endfor %} {%- endfor %} diff --git a/docs/_site/ipynb2markdown.py b/docs/_site/ipynb2markdown.py index dfaae57ef..5e18e62cf 100644 --- a/docs/_site/ipynb2markdown.py +++ b/docs/_site/ipynb2markdown.py @@ -1,6 +1,6 @@ # IPython Notebook to Markdown conversion script # -# Sebastian Raschka 2014-2018 +# Sebastian Raschka 2014-2019 # mlxtend Machine Learning Library Extensions # # Author: Sebastian Raschka diff --git a/docs/_site/make_api.py b/docs/_site/make_api.py index ab50195e7..375570d35 100644 --- a/docs/_site/make_api.py +++ b/docs/_site/make_api.py @@ -1,6 +1,6 @@ # API generator script # -# Sebastian Raschka 2014-2018 +# Sebastian Raschka 2014-2019 # mlxtend Machine Learning Library Extensions # # Author: Sebastian Raschka @@ -8,7 +8,6 @@ # License: BSD 3 clause -import string import inspect import os import sys diff --git a/docs/_site/make_userguide.py b/docs/_site/make_userguide.py index 19176b333..cc51f7662 100644 --- a/docs/_site/make_userguide.py +++ b/docs/_site/make_userguide.py @@ -1,6 +1,6 @@ # API generator script # -# Sebastian Raschka 2014-2018 +# Sebastian Raschka 2014-2019 # mlxtend Machine Learning Library Extensions # # Author: Sebastian Raschka diff --git a/docs/_site/md2pdf.py b/docs/_site/md2pdf.py index 8e48ed3d9..54dcb8551 100644 --- a/docs/_site/md2pdf.py +++ b/docs/_site/md2pdf.py @@ -1,6 +1,6 @@ # API generator script # -# Sebastian Raschka 2014-2018 +# Sebastian Raschka 2014-2019 # mlxtend Machine Learning Library Extensions # # Author: Sebastian Raschka diff --git a/docs/_site/mkdocs.yml b/docs/_site/mkdocs.yml index 67e8e85a1..f6fa8ab29 100755 --- a/docs/_site/mkdocs.yml +++ b/docs/_site/mkdocs.yml @@ -5,12 +5,14 @@ site_description: A library consisting of useful tools and extensions for the da repo_url: https://github.com/rasbt/mlxtend repo_name: GitHub +edit_uri: docs/sources/user_guide/ #include_search: true # not necessary for this theme docs_dir: sources theme: - name: cosmo + name: null + custom_dir: 'custom_theme/' markdown_extensions: - tables @@ -28,7 +30,7 @@ extra_css: - cinder/css/font-awesome-4.0.3.css - cinder/css/highlight.css -copyright: Copyright © 2014-2018 Sebastian Raschka +copyright: Copyright © 2014-2019 Sebastian Raschka google_analytics: ['UA-38457794-2', 'rasbt.github.io/mlxtend/'] nav: @@ -96,6 +98,7 @@ nav: - user_guide/general_concepts/regularization-linear.md - image: - user_guide/image/extract_face_landmarks.md + - user_guide/image/eyepad_align.md - math: - user_guide/math/num_combinations.md - user_guide/math/num_permutations.md @@ -141,6 +144,7 @@ nav: - api_subpackages/mlxtend.feature_selection.md - api_subpackages/mlxtend.file_io.md - api_subpackages/mlxtend.frequent_patterns.md + - api_subpackages/mlxtend.image.md - api_subpackages/mlxtend.plotting.md - api_subpackages/mlxtend.preprocessing.md - api_subpackages/mlxtend.regressor.md diff --git a/docs/_site/site/CHANGELOG/index.html b/docs/_site/site/CHANGELOG/index.html index 62652272e..1ddc4ec15 100644 --- a/docs/_site/site/CHANGELOG/index.html +++ b/docs/_site/site/CHANGELOG/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,138 +930,138 @@
    @@ -1633,7 +1641,7 @@

    Version 0.1.1 (2014-08-13)

    diff --git a/docs/_site/site/CONTRIBUTING/index.html b/docs/_site/site/CONTRIBUTING/index.html index 7b5387459..7e2dd1160 100644 --- a/docs/_site/site/CONTRIBUTING/index.html +++ b/docs/_site/site/CONTRIBUTING/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,16 +930,16 @@
    @@ -1259,7 +1267,7 @@

    5. Updating the conda-forge recipe

    - Copyright © 2014-2018 Sebastian Raschka
    + Copyright © 2014-2019 Sebastian Raschka
    Documentation built with MkDocs.

    diff --git a/docs/_site/site/USER_GUIDE_INDEX/index.html b/docs/_site/site/USER_GUIDE_INDEX/index.html index 195e6397a..4f10b37ed 100644 --- a/docs/_site/site/USER_GUIDE_INDEX/index.html +++ b/docs/_site/site/USER_GUIDE_INDEX/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,37 +930,37 @@
    @@ -1093,7 +1101,7 @@

    utils

    diff --git a/docs/_site/site/api_modules/mlxtend.classifier/Adaline/index.html b/docs/_site/site/api_modules/mlxtend.classifier/Adaline/index.html index a4e769cdb..6c0218d0e 100644 --- a/docs/_site/site/api_modules/mlxtend.classifier/Adaline/index.html +++ b/docs/_site/site/api_modules/mlxtend.classifier/Adaline/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@ @@ -912,7 +920,7 @@
    @@ -1169,7 +1177,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.classifier/LogisticRegression/index.html b/docs/_site/site/api_modules/mlxtend.classifier/LogisticRegression/index.html index 98aa0111b..b0737316f 100644 --- a/docs/_site/site/api_modules/mlxtend.classifier/LogisticRegression/index.html +++ b/docs/_site/site/api_modules/mlxtend.classifier/LogisticRegression/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@ @@ -912,7 +920,7 @@ @@ -912,7 +920,7 @@ @@ -912,7 +920,7 @@ @@ -912,7 +920,7 @@
    @@ -1194,7 +1202,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.classifier/StackingClassifier/index.html b/docs/_site/site/api_modules/mlxtend.classifier/StackingClassifier/index.html index c92df11b6..c11b8d7e0 100644 --- a/docs/_site/site/api_modules/mlxtend.classifier/StackingClassifier/index.html +++ b/docs/_site/site/api_modules/mlxtend.classifier/StackingClassifier/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -1160,7 +1168,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.cluster/Kmeans/index.html b/docs/_site/site/api_modules/mlxtend.cluster/Kmeans/index.html index 43dc5adee..a19db0de0 100644 --- a/docs/_site/site/api_modules/mlxtend.cluster/Kmeans/index.html +++ b/docs/_site/site/api_modules/mlxtend.cluster/Kmeans/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@ @@ -959,7 +967,7 @@

    autompg_data

    diff --git a/docs/_site/site/api_modules/mlxtend.data/boston_housing_data/index.html b/docs/_site/site/api_modules/mlxtend.data/boston_housing_data/index.html index 17331bb7e..162066a2e 100644 --- a/docs/_site/site/api_modules/mlxtend.data/boston_housing_data/index.html +++ b/docs/_site/site/api_modules/mlxtend.data/boston_housing_data/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -967,7 +975,7 @@

    boston_housing_data

    diff --git a/docs/_site/site/api_modules/mlxtend.data/iris_data/index.html b/docs/_site/site/api_modules/mlxtend.data/iris_data/index.html index ae4a18ec7..5d9f912ca 100644 --- a/docs/_site/site/api_modules/mlxtend.data/iris_data/index.html +++ b/docs/_site/site/api_modules/mlxtend.data/iris_data/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -957,7 +965,7 @@

    iris_data

    diff --git a/docs/_site/site/api_modules/mlxtend.data/loadlocal_mnist/index.html b/docs/_site/site/api_modules/mlxtend.data/loadlocal_mnist/index.html index 1629a731c..b870d3ba4 100644 --- a/docs/_site/site/api_modules/mlxtend.data/loadlocal_mnist/index.html +++ b/docs/_site/site/api_modules/mlxtend.data/loadlocal_mnist/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -950,7 +958,7 @@

    loadlocal_mnist

    diff --git a/docs/_site/site/api_modules/mlxtend.data/make_multiplexer_dataset/index.html b/docs/_site/site/api_modules/mlxtend.data/make_multiplexer_dataset/index.html index 40118c2b0..9789fa19a 100644 --- a/docs/_site/site/api_modules/mlxtend.data/make_multiplexer_dataset/index.html +++ b/docs/_site/site/api_modules/mlxtend.data/make_multiplexer_dataset/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -980,7 +988,7 @@

    make_multiplexer_dataset

    diff --git a/docs/_site/site/api_modules/mlxtend.data/mnist_data/index.html b/docs/_site/site/api_modules/mlxtend.data/mnist_data/index.html index 7d3bcfd78..5ef09c9c5 100644 --- a/docs/_site/site/api_modules/mlxtend.data/mnist_data/index.html +++ b/docs/_site/site/api_modules/mlxtend.data/mnist_data/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -941,7 +949,7 @@

    mnist_data

    diff --git a/docs/_site/site/api_modules/mlxtend.data/three_blobs_data/index.html b/docs/_site/site/api_modules/mlxtend.data/three_blobs_data/index.html index ac355a6ac..3e5ef3535 100644 --- a/docs/_site/site/api_modules/mlxtend.data/three_blobs_data/index.html +++ b/docs/_site/site/api_modules/mlxtend.data/three_blobs_data/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -945,7 +953,7 @@

    three_blobs_data

    diff --git a/docs/_site/site/api_modules/mlxtend.data/wine_data/index.html b/docs/_site/site/api_modules/mlxtend.data/wine_data/index.html index 133b3a589..138492b43 100644 --- a/docs/_site/site/api_modules/mlxtend.data/wine_data/index.html +++ b/docs/_site/site/api_modules/mlxtend.data/wine_data/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -964,7 +972,7 @@

    wine_data

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/BootstrapOutOfBag/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/BootstrapOutOfBag/index.html index 82089884b..1f91afbb4 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/BootstrapOutOfBag/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/BootstrapOutOfBag/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -993,7 +1001,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/PredefinedHoldoutSplit/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/PredefinedHoldoutSplit/index.html index ecd25a306..720e1d564 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/PredefinedHoldoutSplit/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/PredefinedHoldoutSplit/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -1000,7 +1008,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/RandomHoldoutSplit/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/RandomHoldoutSplit/index.html index 1433f9668..95ebbacb4 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/RandomHoldoutSplit/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/RandomHoldoutSplit/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -1008,7 +1016,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/bootstrap/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/bootstrap/index.html index d4171b30b..365733b9f 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/bootstrap/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/bootstrap/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -991,7 +999,7 @@

    bootstrap

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/bootstrap_point632_score/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/bootstrap_point632_score/index.html index ba3f88efc..32ce29d19 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/bootstrap_point632_score/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/bootstrap_point632_score/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -1014,7 +1022,7 @@

    bootstrap_point632_score

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/cochrans_q/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/cochrans_q/index.html index b30149453..aa0933373 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/cochrans_q/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/cochrans_q/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -948,7 +956,7 @@

    cochrans_q

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/combined_ftest_5x2cv/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/combined_ftest_5x2cv/index.html index 6fbe47dc0..90e834e31 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/combined_ftest_5x2cv/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/combined_ftest_5x2cv/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -980,7 +988,7 @@

    combined_ftest_5x2cv

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/confusion_matrix/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/confusion_matrix/index.html index 97717b3df..aab07d92a 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/confusion_matrix/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/confusion_matrix/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -954,7 +962,7 @@

    confusion_matrix

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/feature_importance_permutation/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/feature_importance_permutation/index.html index ea8c652e5..381538366 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/feature_importance_permutation/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/feature_importance_permutation/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -975,7 +983,7 @@

    feature_importance_permutation

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/ftest/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/ftest/index.html index 730d17546..0fe86c7f3 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/ftest/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/ftest/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -948,7 +956,7 @@

    ftest

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/lift_score/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/lift_score/index.html index d7eff748e..7b6b4e452 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/lift_score/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/lift_score/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -962,7 +970,7 @@

    lift_score

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar/index.html index efe418ce1..d8b279835 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -961,7 +969,7 @@

    mcnemar

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar_table/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar_table/index.html index ff3b38469..d858aaeaf 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar_table/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar_table/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -954,7 +962,7 @@

    mcnemar_table

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar_tables/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar_tables/index.html index 2d2503baa..130bf2b4f 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar_tables/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/mcnemar_tables/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -974,7 +982,7 @@

    mcnemar_tables

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_5x2cv/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_5x2cv/index.html index 615844fe4..736d494d5 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_5x2cv/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_5x2cv/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -980,7 +988,7 @@

    paired_ttest_5x2cv

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_kfold_cv/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_kfold_cv/index.html index 0160aa2d3..3d2efac59 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_kfold_cv/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_kfold_cv/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -991,7 +999,7 @@

    paired_ttest_kfold_cv

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_resampled/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_resampled/index.html index 759a5765a..181fa49b0 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_resampled/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/paired_ttest_resampled/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -992,7 +1000,7 @@

    paired_ttest_resampled

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/permutation_test/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/permutation_test/index.html index 2f0a1d06e..c655f6671 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/permutation_test/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/permutation_test/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -973,7 +981,7 @@

    permutation_test

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/proportion_difference/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/proportion_difference/index.html index b4fbe9278..427919971 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/proportion_difference/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/proportion_difference/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -956,7 +964,7 @@

    proportion_difference

    diff --git a/docs/_site/site/api_modules/mlxtend.evaluate/scoring/index.html b/docs/_site/site/api_modules/mlxtend.evaluate/scoring/index.html index dbd64d671..d403e234a 100644 --- a/docs/_site/site/api_modules/mlxtend.evaluate/scoring/index.html +++ b/docs/_site/site/api_modules/mlxtend.evaluate/scoring/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -974,7 +982,7 @@

    scoring

    diff --git a/docs/_site/site/api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/index.html b/docs/_site/site/api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/index.html index d410b4edb..12a0b24bb 100644 --- a/docs/_site/site/api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/index.html +++ b/docs/_site/site/api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@ @@ -912,7 +920,7 @@ @@ -912,7 +920,7 @@ @@ -912,7 +920,7 @@
    @@ -1039,7 +1047,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.feature_selection/ExhaustiveFeatureSelector/index.html b/docs/_site/site/api_modules/mlxtend.feature_selection/ExhaustiveFeatureSelector/index.html index 208651011..14bdf5de4 100644 --- a/docs/_site/site/api_modules/mlxtend.feature_selection/ExhaustiveFeatureSelector/index.html +++ b/docs/_site/site/api_modules/mlxtend.feature_selection/ExhaustiveFeatureSelector/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -1153,7 +1161,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.feature_selection/SequentialFeatureSelector/index.html b/docs/_site/site/api_modules/mlxtend.feature_selection/SequentialFeatureSelector/index.html index f7d45c4d9..917b55c2b 100644 --- a/docs/_site/site/api_modules/mlxtend.feature_selection/SequentialFeatureSelector/index.html +++ b/docs/_site/site/api_modules/mlxtend.feature_selection/SequentialFeatureSelector/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -1177,7 +1185,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.file_io/find_filegroups/index.html b/docs/_site/site/api_modules/mlxtend.file_io/find_filegroups/index.html index 709103158..1477f32cd 100644 --- a/docs/_site/site/api_modules/mlxtend.file_io/find_filegroups/index.html +++ b/docs/_site/site/api_modules/mlxtend.file_io/find_filegroups/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -977,7 +985,7 @@

    find_filegroups

    diff --git a/docs/_site/site/api_modules/mlxtend.file_io/find_files/index.html b/docs/_site/site/api_modules/mlxtend.file_io/find_files/index.html index affb785bd..1f697ba9a 100644 --- a/docs/_site/site/api_modules/mlxtend.file_io/find_files/index.html +++ b/docs/_site/site/api_modules/mlxtend.file_io/find_files/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -964,7 +972,7 @@

    find_files

    diff --git a/docs/_site/site/api_modules/mlxtend.frequent_patterns/apriori/index.html b/docs/_site/site/api_modules/mlxtend.frequent_patterns/apriori/index.html index 306ad7342..c91827374 100644 --- a/docs/_site/site/api_modules/mlxtend.frequent_patterns/apriori/index.html +++ b/docs/_site/site/api_modules/mlxtend.frequent_patterns/apriori/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -975,7 +983,7 @@

    apriori

    diff --git a/docs/_site/site/api_modules/mlxtend.frequent_patterns/association_rules/index.html b/docs/_site/site/api_modules/mlxtend.frequent_patterns/association_rules/index.html index dd6eabb18..1991dfd5c 100644 --- a/docs/_site/site/api_modules/mlxtend.frequent_patterns/association_rules/index.html +++ b/docs/_site/site/api_modules/mlxtend.frequent_patterns/association_rules/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -988,7 +996,7 @@

    association_rules

    diff --git a/docs/_site/site/api_modules/mlxtend.image/extract_face_landmarks/index.html b/docs/_site/site/api_modules/mlxtend.image/extract_face_landmarks/index.html index f766cfcd4..2f1e2cd44 100644 --- a/docs/_site/site/api_modules/mlxtend.image/extract_face_landmarks/index.html +++ b/docs/_site/site/api_modules/mlxtend.image/extract_face_landmarks/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -952,7 +960,7 @@

    extract_face_landmarks

    diff --git a/docs/_site/site/api_modules/mlxtend.math/factorial/index.html b/docs/_site/site/api_modules/mlxtend.math/factorial/index.html index 0b54bc83c..583c6e949 100644 --- a/docs/_site/site/api_modules/mlxtend.math/factorial/index.html +++ b/docs/_site/site/api_modules/mlxtend.math/factorial/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -925,7 +933,7 @@

    factorial

    diff --git a/docs/_site/site/api_modules/mlxtend.math/num_combinations/index.html b/docs/_site/site/api_modules/mlxtend.math/num_combinations/index.html index b82c358f1..c6ce142f7 100644 --- a/docs/_site/site/api_modules/mlxtend.math/num_combinations/index.html +++ b/docs/_site/site/api_modules/mlxtend.math/num_combinations/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -950,7 +958,7 @@

    num_combinations

    diff --git a/docs/_site/site/api_modules/mlxtend.math/num_permutations/index.html b/docs/_site/site/api_modules/mlxtend.math/num_permutations/index.html index a637fd6b9..1636ba9cf 100644 --- a/docs/_site/site/api_modules/mlxtend.math/num_permutations/index.html +++ b/docs/_site/site/api_modules/mlxtend.math/num_permutations/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -950,7 +958,7 @@

    num_permutations

    diff --git a/docs/_site/site/api_modules/mlxtend.math/vectorspace_dimensionality/index.html b/docs/_site/site/api_modules/mlxtend.math/vectorspace_dimensionality/index.html index 0b7fa252b..83bf81b90 100644 --- a/docs/_site/site/api_modules/mlxtend.math/vectorspace_dimensionality/index.html +++ b/docs/_site/site/api_modules/mlxtend.math/vectorspace_dimensionality/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -940,7 +948,7 @@

    vectorspace_dimensionality

    diff --git a/docs/_site/site/api_modules/mlxtend.math/vectorspace_orthonormalization/index.html b/docs/_site/site/api_modules/mlxtend.math/vectorspace_orthonormalization/index.html index c325cf8b1..1259b562a 100644 --- a/docs/_site/site/api_modules/mlxtend.math/vectorspace_orthonormalization/index.html +++ b/docs/_site/site/api_modules/mlxtend.math/vectorspace_orthonormalization/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -947,7 +955,7 @@

    vectorspace_orthonormalization

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/category_scatter/index.html b/docs/_site/site/api_modules/mlxtend.plotting/category_scatter/index.html index 829aaef55..3921a95cc 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/category_scatter/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/category_scatter/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -970,7 +978,7 @@

    category_scatter

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/checkerboard_plot/index.html b/docs/_site/site/api_modules/mlxtend.plotting/checkerboard_plot/index.html index 0dcb64b34..461962db2 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/checkerboard_plot/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/checkerboard_plot/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -973,7 +981,7 @@

    checkerboard_plot

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/ecdf/index.html b/docs/_site/site/api_modules/mlxtend.plotting/ecdf/index.html index e50254d0c..0f3c22536 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/ecdf/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/ecdf/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -986,7 +994,7 @@

    ecdf

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/enrichment_plot/index.html b/docs/_site/site/api_modules/mlxtend.plotting/enrichment_plot/index.html index a307ce985..4141b40a2 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/enrichment_plot/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/enrichment_plot/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -994,7 +1002,7 @@

    enrichment_plot

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/plot_confusion_matrix/index.html b/docs/_site/site/api_modules/mlxtend.plotting/plot_confusion_matrix/index.html index 08ab92450..b01a84c93 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/plot_confusion_matrix/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/plot_confusion_matrix/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -975,7 +983,7 @@

    plot_confusion_matrix

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/plot_decision_regions/index.html b/docs/_site/site/api_modules/mlxtend.plotting/plot_decision_regions/index.html index 3b872f2ba..5d7be9824 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/plot_decision_regions/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/plot_decision_regions/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -1017,7 +1025,7 @@

    plot_decision_regions

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/plot_learning_curves/index.html b/docs/_site/site/api_modules/mlxtend.plotting/plot_learning_curves/index.html index a6debdd0a..4c2bdb590 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/plot_learning_curves/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/plot_learning_curves/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -990,7 +998,7 @@

    plot_learning_curves

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/plot_linear_regression/index.html b/docs/_site/site/api_modules/mlxtend.plotting/plot_linear_regression/index.html index 772d3425a..50026a8a5 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/plot_linear_regression/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/plot_linear_regression/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -965,7 +973,7 @@

    plot_linear_regression

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/plot_sequential_feature_selection/index.html b/docs/_site/site/api_modules/mlxtend.plotting/plot_sequential_feature_selection/index.html index 9a8862efa..a319ef635 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/plot_sequential_feature_selection/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/plot_sequential_feature_selection/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -969,7 +977,7 @@

    plot_sequential_feature_selection


    - Copyright © 2014-2018 Sebastian Raschka
    + Copyright © 2014-2019 Sebastian Raschka
    Documentation built with MkDocs.

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/remove_borders/index.html b/docs/_site/site/api_modules/mlxtend.plotting/remove_borders/index.html index 5dd9eac34..5c7fb5659 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/remove_borders/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/remove_borders/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -952,7 +960,7 @@

    remove_borders

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/scatterplotmatrix/index.html b/docs/_site/site/api_modules/mlxtend.plotting/scatterplotmatrix/index.html index 5aac710e7..d0509c0ee 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/scatterplotmatrix/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/scatterplotmatrix/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -969,7 +977,7 @@

    scatterplotmatrix

    diff --git a/docs/_site/site/api_modules/mlxtend.plotting/stacked_barplot/index.html b/docs/_site/site/api_modules/mlxtend.plotting/stacked_barplot/index.html index a3276f8d0..17b6b54dc 100644 --- a/docs/_site/site/api_modules/mlxtend.plotting/stacked_barplot/index.html +++ b/docs/_site/site/api_modules/mlxtend.plotting/stacked_barplot/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -958,7 +966,7 @@

    stacked_barplot

    diff --git a/docs/_site/site/api_modules/mlxtend.preprocessing/CopyTransformer/index.html b/docs/_site/site/api_modules/mlxtend.preprocessing/CopyTransformer/index.html index c31cf3a8d..fce995c41 100644 --- a/docs/_site/site/api_modules/mlxtend.preprocessing/CopyTransformer/index.html +++ b/docs/_site/site/api_modules/mlxtend.preprocessing/CopyTransformer/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -1014,7 +1022,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.preprocessing/DenseTransformer/index.html b/docs/_site/site/api_modules/mlxtend.preprocessing/DenseTransformer/index.html index deec62626..4896d601a 100644 --- a/docs/_site/site/api_modules/mlxtend.preprocessing/DenseTransformer/index.html +++ b/docs/_site/site/api_modules/mlxtend.preprocessing/DenseTransformer/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -1014,7 +1022,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.preprocessing/MeanCenterer/index.html b/docs/_site/site/api_modules/mlxtend.preprocessing/MeanCenterer/index.html index 8f43108b1..cfe3913c7 100644 --- a/docs/_site/site/api_modules/mlxtend.preprocessing/MeanCenterer/index.html +++ b/docs/_site/site/api_modules/mlxtend.preprocessing/MeanCenterer/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -991,7 +999,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.preprocessing/OnehotTransactions/index.html b/docs/_site/site/api_modules/mlxtend.preprocessing/OnehotTransactions/index.html index 0612de5ee..b7b1d4a52 100644 --- a/docs/_site/site/api_modules/mlxtend.preprocessing/OnehotTransactions/index.html +++ b/docs/_site/site/api_modules/mlxtend.preprocessing/OnehotTransactions/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -1086,7 +1094,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.preprocessing/TransactionEncoder/index.html b/docs/_site/site/api_modules/mlxtend.preprocessing/TransactionEncoder/index.html index eeadb146a..66b8d7d98 100644 --- a/docs/_site/site/api_modules/mlxtend.preprocessing/TransactionEncoder/index.html +++ b/docs/_site/site/api_modules/mlxtend.preprocessing/TransactionEncoder/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -1086,7 +1094,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.preprocessing/minmax_scaling/index.html b/docs/_site/site/api_modules/mlxtend.preprocessing/minmax_scaling/index.html index dc10712f7..9a5b55992 100644 --- a/docs/_site/site/api_modules/mlxtend.preprocessing/minmax_scaling/index.html +++ b/docs/_site/site/api_modules/mlxtend.preprocessing/minmax_scaling/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -954,7 +962,7 @@

    minmax_scaling

    diff --git a/docs/_site/site/api_modules/mlxtend.preprocessing/one_hot/index.html b/docs/_site/site/api_modules/mlxtend.preprocessing/one_hot/index.html index cbd355714..d6a6b47c3 100644 --- a/docs/_site/site/api_modules/mlxtend.preprocessing/one_hot/index.html +++ b/docs/_site/site/api_modules/mlxtend.preprocessing/one_hot/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -952,7 +960,7 @@

    one_hot

    diff --git a/docs/_site/site/api_modules/mlxtend.preprocessing/shuffle_arrays_unison/index.html b/docs/_site/site/api_modules/mlxtend.preprocessing/shuffle_arrays_unison/index.html index 271dab19f..453129447 100644 --- a/docs/_site/site/api_modules/mlxtend.preprocessing/shuffle_arrays_unison/index.html +++ b/docs/_site/site/api_modules/mlxtend.preprocessing/shuffle_arrays_unison/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -952,7 +960,7 @@

    shuffle_arrays_unison

    diff --git a/docs/_site/site/api_modules/mlxtend.preprocessing/standardize/index.html b/docs/_site/site/api_modules/mlxtend.preprocessing/standardize/index.html index 6fc348626..31e7f3fb5 100644 --- a/docs/_site/site/api_modules/mlxtend.preprocessing/standardize/index.html +++ b/docs/_site/site/api_modules/mlxtend.preprocessing/standardize/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -971,7 +979,7 @@

    standardize

    diff --git a/docs/_site/site/api_modules/mlxtend.regressor/LinearRegression/index.html b/docs/_site/site/api_modules/mlxtend.regressor/LinearRegression/index.html index ebdd985e5..ac276fa82 100644 --- a/docs/_site/site/api_modules/mlxtend.regressor/LinearRegression/index.html +++ b/docs/_site/site/api_modules/mlxtend.regressor/LinearRegression/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@ @@ -912,7 +920,7 @@
    @@ -1166,7 +1174,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.regressor/StackingRegressor/index.html b/docs/_site/site/api_modules/mlxtend.regressor/StackingRegressor/index.html index 78924db36..3b0ccc130 100644 --- a/docs/_site/site/api_modules/mlxtend.regressor/StackingRegressor/index.html +++ b/docs/_site/site/api_modules/mlxtend.regressor/StackingRegressor/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,9 +920,9 @@
    @@ -1159,7 +1167,7 @@

    Properties

    diff --git a/docs/_site/site/api_modules/mlxtend.text/generalize_names/index.html b/docs/_site/site/api_modules/mlxtend.text/generalize_names/index.html index 67e02230f..f09421c71 100644 --- a/docs/_site/site/api_modules/mlxtend.text/generalize_names/index.html +++ b/docs/_site/site/api_modules/mlxtend.text/generalize_names/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -952,7 +960,7 @@

    generalize_names

    diff --git a/docs/_site/site/api_modules/mlxtend.text/generalize_names_duplcheck/index.html b/docs/_site/site/api_modules/mlxtend.text/generalize_names_duplcheck/index.html index 5da23d1fc..ce7aa3567 100644 --- a/docs/_site/site/api_modules/mlxtend.text/generalize_names_duplcheck/index.html +++ b/docs/_site/site/api_modules/mlxtend.text/generalize_names_duplcheck/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -952,7 +960,7 @@

    generalize_names_duplcheck

    diff --git a/docs/_site/site/api_modules/mlxtend.text/tokenizer_emoticons/index.html b/docs/_site/site/api_modules/mlxtend.text/tokenizer_emoticons/index.html index 50e8f0c19..09d97cd4f 100644 --- a/docs/_site/site/api_modules/mlxtend.text/tokenizer_emoticons/index.html +++ b/docs/_site/site/api_modules/mlxtend.text/tokenizer_emoticons/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -931,7 +939,7 @@

    tokenizer_emoticons

    diff --git a/docs/_site/site/api_modules/mlxtend.text/tokenizer_words_and_emoticons/index.html b/docs/_site/site/api_modules/mlxtend.text/tokenizer_words_and_emoticons/index.html index e8ae0469d..5a80371f8 100644 --- a/docs/_site/site/api_modules/mlxtend.text/tokenizer_words_and_emoticons/index.html +++ b/docs/_site/site/api_modules/mlxtend.text/tokenizer_words_and_emoticons/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -931,7 +939,7 @@

    tokenizer_words_and_emoticons

    diff --git a/docs/_site/site/api_modules/mlxtend.utils/Counter/index.html b/docs/_site/site/api_modules/mlxtend.utils/Counter/index.html index 5f89aa9c2..576b5acc6 100644 --- a/docs/_site/site/api_modules/mlxtend.utils/Counter/index.html +++ b/docs/_site/site/api_modules/mlxtend.utils/Counter/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,7 +920,7 @@
    @@ -983,7 +991,7 @@

    Methods

    diff --git a/docs/_site/site/api_modules/mlxtend.utils/assert_raises/index.html b/docs/_site/site/api_modules/mlxtend.utils/assert_raises/index.html index a2e50d0c2..fac9395b5 100644 --- a/docs/_site/site/api_modules/mlxtend.utils/assert_raises/index.html +++ b/docs/_site/site/api_modules/mlxtend.utils/assert_raises/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -946,7 +954,7 @@

    assert_raises

    diff --git a/docs/_site/site/api_modules/mlxtend.utils/check_Xy/index.html b/docs/_site/site/api_modules/mlxtend.utils/check_Xy/index.html index 6355ca688..b53b48e9f 100644 --- a/docs/_site/site/api_modules/mlxtend.utils/check_Xy/index.html +++ b/docs/_site/site/api_modules/mlxtend.utils/check_Xy/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -925,7 +933,7 @@

    check_Xy

    diff --git a/docs/_site/site/api_modules/mlxtend.utils/format_kwarg_dictionaries/index.html b/docs/_site/site/api_modules/mlxtend.utils/format_kwarg_dictionaries/index.html index 2d2d8d500..f538de265 100644 --- a/docs/_site/site/api_modules/mlxtend.utils/format_kwarg_dictionaries/index.html +++ b/docs/_site/site/api_modules/mlxtend.utils/format_kwarg_dictionaries/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -948,7 +956,7 @@

    format_kwarg_dictionaries

    diff --git a/docs/_site/site/api_subpackages/mlxtend._base/index.html b/docs/_site/site/api_subpackages/mlxtend._base/index.html index addebe6b8..497df9404 100644 --- a/docs/_site/site/api_subpackages/mlxtend._base/index.html +++ b/docs/_site/site/api_subpackages/mlxtend._base/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,7 +930,7 @@ diff --git a/docs/_site/site/api_subpackages/mlxtend.classifier/index.html b/docs/_site/site/api_subpackages/mlxtend.classifier/index.html index f2759e745..dc7713929 100644 --- a/docs/_site/site/api_subpackages/mlxtend.classifier/index.html +++ b/docs/_site/site/api_subpackages/mlxtend.classifier/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,48 +930,48 @@
    @@ -2624,7 +2632,7 @@

    Methods

    diff --git a/docs/_site/site/api_subpackages/mlxtend.cluster/index.html b/docs/_site/site/api_subpackages/mlxtend.cluster/index.html index 71a37801c..aa05d6241 100644 --- a/docs/_site/site/api_subpackages/mlxtend.cluster/index.html +++ b/docs/_site/site/api_subpackages/mlxtend.cluster/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,7 +930,7 @@ @@ -1227,7 +1235,7 @@

    wine_data

    diff --git a/docs/_site/site/api_subpackages/mlxtend.evaluate/index.html b/docs/_site/site/api_subpackages/mlxtend.evaluate/index.html index e703d7c6e..924000bb1 100644 --- a/docs/_site/site/api_subpackages/mlxtend.evaluate/index.html +++ b/docs/_site/site/api_subpackages/mlxtend.evaluate/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,13 +930,13 @@ @@ -922,7 +930,7 @@ diff --git a/docs/_site/site/api_subpackages/mlxtend.feature_extraction/index.html b/docs/_site/site/api_subpackages/mlxtend.feature_extraction/index.html index 665ed1cfe..b0c69ebd0 100644 --- a/docs/_site/site/api_subpackages/mlxtend.feature_extraction/index.html +++ b/docs/_site/site/api_subpackages/mlxtend.feature_extraction/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,21 +930,21 @@ @@ -922,13 +930,13 @@
    @@ -1538,7 +1546,7 @@

    Methods

    diff --git a/docs/_site/site/api_subpackages/mlxtend.file_io/index.html b/docs/_site/site/api_subpackages/mlxtend.file_io/index.html index faabfa32c..5e3f832f4 100644 --- a/docs/_site/site/api_subpackages/mlxtend.file_io/index.html +++ b/docs/_site/site/api_subpackages/mlxtend.file_io/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -1031,7 +1039,7 @@

    find_files

    diff --git a/docs/_site/site/api_subpackages/mlxtend.frequent_patterns/index.html b/docs/_site/site/api_subpackages/mlxtend.frequent_patterns/index.html index dc3baa4cc..09b1b5258 100644 --- a/docs/_site/site/api_subpackages/mlxtend.frequent_patterns/index.html +++ b/docs/_site/site/api_subpackages/mlxtend.frequent_patterns/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,18 +903,20 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -1053,7 +1061,7 @@

    association_rules

    diff --git a/docs/_site/site/api_subpackages/mlxtend.image/index.html b/docs/_site/site/api_subpackages/mlxtend.image/index.html index 88766bedc..593601503 100644 --- a/docs/_site/site/api_subpackages/mlxtend.image/index.html +++ b/docs/_site/site/api_subpackages/mlxtend.image/index.html @@ -752,7 +752,7 @@ -
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -1029,7 +1037,7 @@

    vectorspace_orthonormalization

    diff --git a/docs/_site/site/api_subpackages/mlxtend.plotting/index.html b/docs/_site/site/api_subpackages/mlxtend.plotting/index.html index 4bee1939c..84c18d621 100644 --- a/docs/_site/site/api_subpackages/mlxtend.plotting/index.html +++ b/docs/_site/site/api_subpackages/mlxtend.plotting/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -1598,7 +1606,7 @@

    stacked_barplot

    diff --git a/docs/_site/site/api_subpackages/mlxtend.preprocessing/index.html b/docs/_site/site/api_subpackages/mlxtend.preprocessing/index.html index da4d40974..f445add5d 100644 --- a/docs/_site/site/api_subpackages/mlxtend.preprocessing/index.html +++ b/docs/_site/site/api_subpackages/mlxtend.preprocessing/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,19 +930,19 @@ @@ -922,18 +930,18 @@
    @@ -1559,7 +1567,7 @@

    Properties

    diff --git a/docs/_site/site/api_subpackages/mlxtend.text/index.html b/docs/_site/site/api_subpackages/mlxtend.text/index.html index 4f1f19304..eb5e4780d 100644 --- a/docs/_site/site/api_subpackages/mlxtend.text/index.html +++ b/docs/_site/site/api_subpackages/mlxtend.text/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -1014,7 +1022,7 @@

    tokenizer_words_and_emoticons

    diff --git a/docs/_site/site/api_subpackages/mlxtend.utils/index.html b/docs/_site/site/api_subpackages/mlxtend.utils/index.html index 67137c69c..ff401432b 100644 --- a/docs/_site/site/api_subpackages/mlxtend.utils/index.html +++ b/docs/_site/site/api_subpackages/mlxtend.utils/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,7 +930,7 @@ @@ -954,7 +962,7 @@

    Citing mlxtend

    diff --git a/docs/_site/site/contributors/index.html b/docs/_site/site/contributors/index.html index e22d3ed27..eeb173609 100644 --- a/docs/_site/site/contributors/index.html +++ b/docs/_site/site/contributors/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -934,7 +942,7 @@

    Contributors

    diff --git a/docs/_site/site/css/base.css b/docs/_site/site/css/base.css index eb8e296fc..923228600 100644 --- a/docs/_site/site/css/base.css +++ b/docs/_site/site/css/base.css @@ -5,7 +5,7 @@ body { h1[id]:before, h2[id]:before, h3[id]:before, h4[id]:before, h5[id]:before, h6[id]:before { content: ""; display: block; - margin-top: -75px; + margin-top: 0px; height: 75px; } diff --git a/docs/_site/site/css/cinder.css b/docs/_site/site/css/cinder.css index ce3cb62b2..47fbb9897 100644 --- a/docs/_site/site/css/cinder.css +++ b/docs/_site/site/css/cinder.css @@ -80,3 +80,18 @@ pre { footer > hr { width: 35%; } + + +/* + modified dropbox menu, Sebastian Raschka 2016 +*/ + +.dropdown-menu > li > a { + display: block; + padding: 5px 20px; + clear: both; + font-weight: normal; + line-height: 1.3; + color: #333333; + white-space: nowrap; +} diff --git a/docs/_site/site/discuss/index.html b/docs/_site/site/discuss/index.html index fda7c1e76..e2b51284f 100644 --- a/docs/_site/site/discuss/index.html +++ b/docs/_site/site/discuss/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -935,7 +943,7 @@

    Discuss

    diff --git a/docs/_site/site/index.html b/docs/_site/site/index.html index bfb22d771..da6bd6d5b 100644 --- a/docs/_site/site/index.html +++ b/docs/_site/site/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -1031,7 +1039,7 @@

    Contact

    @@ -1082,5 +1090,5 @@ diff --git a/docs/_site/site/installation/index.html b/docs/_site/site/installation/index.html index c1bf7f3ea..82e23322a 100644 --- a/docs/_site/site/installation/index.html +++ b/docs/_site/site/installation/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,13 +930,13 @@
    @@ -980,7 +988,7 @@

    Dev Version

    diff --git a/docs/_site/site/license/index.html b/docs/_site/site/license/index.html index 7c9d407ef..b84361436 100644 --- a/docs/_site/site/license/index.html +++ b/docs/_site/site/license/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -923,9 +931,9 @@
  • new BSD License
  • Creative Commons Attribution 4.0 International License
  • You are free to:
  • - +
  • Under the following terms:
  • - +
    @@ -942,7 +950,7 @@

    new BSD License


    New BSD License

    -

    Copyright (c) 2014-2018, Sebastian Raschka. All rights reserved.

    +

    Copyright (c) 2014-2019, Sebastian Raschka. All rights reserved.

    Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

      @@ -990,7 +998,7 @@

      Under the following terms:

      diff --git a/docs/_site/site/search/search_index.json b/docs/_site/site/search/search_index.json index af1e5d378..407626cff 100644 --- a/docs/_site/site/search/search_index.json +++ b/docs/_site/site/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Welcome to mlxtend's documentation! Mlxtend (machine learning extensions) is a Python library of useful tools for the day-to-day data science tasks. Links Documentation: http://rasbt.github.io/mlxtend Source code repository: https://github.com/rasbt/mlxtend PyPI: https://pypi.python.org/pypi/mlxtend Questions? Check out the Google Groups mailing list Examples import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import itertools from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions # Initializing Classifiers clf1 = LogisticRegression(random_state=0) clf2 = RandomForestClassifier(random_state=0) clf3 = SVC(random_state=0, probability=True) eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[2, 1, 1], voting='soft') # Loading some example data X, y = iris_data() X = X[:,[0, 2]] # Plotting Decision Regions gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10, 8)) labels = ['Logistic Regression', 'Random Forest', 'RBF kernel SVM', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() If you use mlxtend as part of your workflow in a scientific publication, please consider citing the mlxtend repository with the following DOI: @article{raschkas_2018_mlxtend, author = {Sebastian Raschka}, title = {MLxtend: Providing machine learning and data science utilities and extensions to Python\u2019s scientific computing stack}, journal = {The Journal of Open Source Software}, volume = {3}, number = {24}, month = apr, year = 2018, publisher = {The Open Journal}, doi = {10.21105/joss.00638}, url = {http://joss.theoj.org/papers/10.21105/joss.00638} } License This project is released under a permissive new BSD open source license ( LICENSE-BSD3.txt ) and commercially usable. There is no warranty; not even for merchantability or fitness for a particular purpose. In addition, you may use, copy, modify and redistribute all artistic creative works (figures and images) included in this distribution under the directory according to the terms and conditions of the Creative Commons Attribution 4.0 International License. See the file LICENSE-CC-BY.txt for details. (Computer-generated graphics such as the plots produced by matplotlib fall under the BSD license mentioned above). Contact I received a lot of feedback and questions about mlxtend recently, and I thought that it would be worthwhile to set up a public communication channel. Before you write an email with a question about mlxtend, please consider posting it here since it can also be useful to others! Please join the Google Groups Mailing List ! If Google Groups is not for you, please feel free to write me an email or consider filing an issue on GitHub's issue tracker for new feature requests or bug reports. In addition, I setup a Gitter channel for live discussions.","title":"Home"},{"location":"#welcome-to-mlxtends-documentation","text":"Mlxtend (machine learning extensions) is a Python library of useful tools for the day-to-day data science tasks.","title":"Welcome to mlxtend's documentation!"},{"location":"#links","text":"Documentation: http://rasbt.github.io/mlxtend Source code repository: https://github.com/rasbt/mlxtend PyPI: https://pypi.python.org/pypi/mlxtend Questions? Check out the Google Groups mailing list","title":"Links"},{"location":"#examples","text":"import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import itertools from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions # Initializing Classifiers clf1 = LogisticRegression(random_state=0) clf2 = RandomForestClassifier(random_state=0) clf3 = SVC(random_state=0, probability=True) eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[2, 1, 1], voting='soft') # Loading some example data X, y = iris_data() X = X[:,[0, 2]] # Plotting Decision Regions gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10, 8)) labels = ['Logistic Regression', 'Random Forest', 'RBF kernel SVM', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() If you use mlxtend as part of your workflow in a scientific publication, please consider citing the mlxtend repository with the following DOI: @article{raschkas_2018_mlxtend, author = {Sebastian Raschka}, title = {MLxtend: Providing machine learning and data science utilities and extensions to Python\u2019s scientific computing stack}, journal = {The Journal of Open Source Software}, volume = {3}, number = {24}, month = apr, year = 2018, publisher = {The Open Journal}, doi = {10.21105/joss.00638}, url = {http://joss.theoj.org/papers/10.21105/joss.00638} }","title":"Examples"},{"location":"#license","text":"This project is released under a permissive new BSD open source license ( LICENSE-BSD3.txt ) and commercially usable. There is no warranty; not even for merchantability or fitness for a particular purpose. In addition, you may use, copy, modify and redistribute all artistic creative works (figures and images) included in this distribution under the directory according to the terms and conditions of the Creative Commons Attribution 4.0 International License. See the file LICENSE-CC-BY.txt for details. (Computer-generated graphics such as the plots produced by matplotlib fall under the BSD license mentioned above).","title":"License"},{"location":"#contact","text":"I received a lot of feedback and questions about mlxtend recently, and I thought that it would be worthwhile to set up a public communication channel. Before you write an email with a question about mlxtend, please consider posting it here since it can also be useful to others! Please join the Google Groups Mailing List ! If Google Groups is not for you, please feel free to write me an email or consider filing an issue on GitHub's issue tracker for new feature requests or bug reports. In addition, I setup a Gitter channel for live discussions.","title":"Contact"},{"location":"CHANGELOG/","text":"Release Notes The CHANGELOG for the current development version is available at https://github.com/rasbt/mlxtend/blob/master/docs/sources/CHANGELOG.md . Version 0.14.0 (11-09-2018) Downloads Source code (zip) Source code (tar.gz) New Features Added a scatterplotmatrix function to the plotting module. ( #437 ) Added sample_weight option to StackingRegressor , StackingClassifier , StackingCVRegressor , StackingCVClassifier , EnsembleVoteClassifier . ( #438 ) Added a RandomHoldoutSplit class to perform a random train/valid split without rotation in SequentialFeatureSelector , scikit-learn GridSearchCV etc. ( #442 ) Added a PredefinedHoldoutSplit class to perform a train/valid split, based on user-specified indices, without rotation in SequentialFeatureSelector , scikit-learn GridSearchCV etc. ( #443 ) Created a new mlxtend.image submodule for working on image processing-related tasks. ( #457 ) Added a new convenience function extract_face_landmarks based on dlib to mlxtend.image . ( #458 ) Added a method='oob' option to the mlxtend.evaluate.bootstrap_point632_score method to compute the classic out-of-bag bootstrap estimate ( #459 ) Added a method='.632+' option to the mlxtend.evaluate.bootstrap_point632_score method to compute the .632+ bootstrap estimate that addresses the optimism bias of the .632 bootstrap ( #459 ) Added a new mlxtend.evaluate.ftest function to perform an F-test for comparing the accuracies of two or more classification models. ( #460 ) Added a new mlxtend.evaluate.combined_ftest_5x2cv function to perform an combined 5x2cv F-Test for comparing the performance of two models. ( #461 ) Added a new mlxtend.evaluate.difference_proportions test for comparing two proportions (e.g., classifier accuracies) ( #462 ) Changes Addressed deprecations warnings in NumPy 0.15. ( #425 ) Because of complications in PR ( #459 ), Python 2.7 was now dropped; since official support for Python 2.7 by the Python Software Foundation is ending in approx. 12 months anyways, this re-focussing will hopefully free up some developer time with regard to not having to worry about backward compatibility Bug Fixes Fixed an issue with a missing import in mlxtend.plotting.plot_confusion_matrix . ( #428 ) Version 0.13.0 (2018-07-20) Downloads Source code (zip) Source code (tar.gz) New Features A meaningful error message is now raised when a cross-validation generator is used with SequentialFeatureSelector . ( #377 ) The SequentialFeatureSelector now accepts custom feature names via the fit method for more interpretable feature subset reports. ( #379 ) The SequentialFeatureSelector is now also compatible with Pandas DataFrames and uses DataFrame column-names for more interpretable feature subset reports. ( #379 ) ColumnSelector now works with Pandas DataFrames columns. ( #378 by Manuel Garrido ) The ExhaustiveFeatureSelector estimator in mlxtend.feature_selection now is safely stoppable mid-process by control+c. ( #380 ) Two new functions, vectorspace_orthonormalization and vectorspace_dimensionality were added to mlxtend.math to use the Gram-Schmidt process to convert a set of linearly independent vectors into a set of orthonormal basis vectors, and to compute the dimensionality of a vectorspace, respectively. ( #382 ) mlxtend.frequent_patterns.apriori now supports pandas SparseDataFrame s to generate frequent itemsets. ( #404 via Daniel Morales ) The plot_confusion_matrix function now has the ability to show normalized confusion matrix coefficients in addition to or instead of absolute confusion matrix coefficients with or without a colorbar. The text display method has been changed so that the full range of the colormap is used. The default size is also now set based on the number of classes. Added support for merging the meta features with the original input features in StackingRegressor (via use_features_in_secondary ) like it is already supported in the other Stacking classes. ( #418 ) Added a support_only to the association_rules function, which allow constructing association rules (based on the support metric only) for cropped input DataFrames that don't contain a complete set of antecedent and consequent support values. ( #421 ) Changes Itemsets generated with apriori are now frozenset s ( #393 by William Laney and #394 ) Now raises an error if a input DataFrame to apriori contains non 0, 1, True, False values. #419 ) Bug Fixes Allow mlxtend estimators to be cloned via scikit-learn's clone function. ( #374 ) Fixes bug to allow the correct use of refit=False in StackingRegressor and StackingCVRegressor ( #384 and ( #385 ) by selay01 ) Allow StackingClassifier to work with sparse matrices when use_features_in_secondary=True ( #408 by Floris Hoogenbook ) Allow StackingCVRegressor to work with sparse matrices when use_features_in_secondary=True ( #416 ) Allow StackingCVClassifier to work with sparse matrices when use_features_in_secondary=True ( #417 ) Version 0.12.0 (2018-21-04) Downloads Source code (zip) Source code (tar.gz) New Features A new feature_importance_permuation function to compute the feature importance in classifiers and regressors via the permutation importance method ( #358 ) The fit method of the ExhaustiveFeatureSelector now optionally accepts **fit_params for the estimator that is used for the feature selection. ( #354 by Zach Griffith) The fit method of the SequentialFeatureSelector now optionally accepts **fit_params for the estimator that is used for the feature selection. ( #350 by Zach Griffith) Changes Replaced plot_decision_regions colors by a colorblind-friendly palette and adds contour lines for decision regions. ( #348 ) All stacking estimators now raise NonFittedErrors if any method for inference is called prior to fitting the estimator. ( #353 ) Renamed the refit parameter of both the StackingClassifier and StackingCVClassifier to use_clones to be more explicit and less misleading. ( #368 ) Bug Fixes Various changes in the documentation and documentation tools to fix formatting issues ( #363 ) Fixes a bug where the StackingCVClassifier 's meta features were not stored in the original order when shuffle=True ( #370 ) Many documentation improvements, including links to the User Guides in the API docs ( #371 ) Version 0.11.0 (2018-03-14) Downloads Source code (zip) Source code (tar.gz) New Features New function implementing the resampled paired t-test procedure ( paired_ttest_resampled ) to compare the performance of two models. ( #323 ) New function implementing the k-fold paired t-test procedure ( paired_ttest_kfold_cv ) to compare the performance of two models (also called k-hold-out paired t-test). ( #324 ) New function implementing the 5x2cv paired t-test procedure ( paired_ttest_5x2cv ) proposed by Dieterrich (1998) to compare the performance of two models. ( #325 ) A refit parameter was added to stacking classes (similar to the refit parameter in the EnsembleVoteClassifier ), to support classifiers and regressors that follow the scikit-learn API but are not compatible with scikit-learn's clone function. ( #322 ) The ColumnSelector now has a drop_axis argument to use it in pipelines with CountVectorizers . ( #333 ) Changes Raises an informative error message if predict or predict_meta_features is called prior to calling the fit method in StackingRegressor and StackingCVRegressor . ( #315 ) The plot_decision_regions function now automatically determines the optimal setting based on the feature dimensions and supports anti-aliasing. The old res parameter has been deprecated. ( #309 by Guillaume Poirier-Morency ) Apriori code is faster due to optimization in onehot transformation and the amount of candidates generated by the apriori algorithm. ( #327 by Jakub Smid ) The OnehotTransactions class (which is typically often used in combination with the apriori function for association rule mining) is now more memory efficient as it uses boolean arrays instead of integer arrays. In addition, the OnehotTransactions class can be now be provided with sparse argument to generate sparse representations of the onehot matrix to further improve memory efficiency. ( #328 by Jakub Smid ) The OneHotTransactions has been deprecated and replaced by the TransactionEncoder . ( #332 The plot_decision_regions function now has three new parameters, scatter_kwargs , contourf_kwargs , and scatter_highlight_kwargs , that can be used to modify the plotting style. ( #342 by James Bourbeau ) Bug Fixes Fixed issue when class labels were provided to the EnsembleVoteClassifier when refit was set to false . ( #322 ) Allow arrays with 16-bit and 32-bit precision in plot_decision_regions function. ( #337 ) Fixed bug that raised an indexing error if the number of items was <= 1 when computing association rules using the conviction metric. ( #340 ) Version 0.10.0 (2017-12-22) Downloads Source code (zip) Source code (tar.gz) New Features New store_train_meta_features parameter for fit in StackingCVRegressor. if True, train meta-features are stored in self.train_meta_features_ . New pred_meta_features method for StackingCVRegressor . People can get test meta-features using this method. ( #294 via takashioya ) The new store_train_meta_features attribute and pred_meta_features method for the StackingCVRegressor were also added to the StackingRegressor , StackingClassifier , and StackingCVClassifier ( #299 & #300 ) New function ( evaluate.mcnemar_tables ) for creating multiple 2x2 contigency from model predictions arrays that can be used in multiple McNemar (post-hoc) tests or Cochran's Q or F tests, etc. ( #307 ) New function ( evaluate.cochrans_q ) for performing Cochran's Q test to compare the accuracy of multiple classifiers. ( #310 ) Changes Added requirements.txt to setup.py . ( #304 via Colin Carrol ) Bug Fixes Improved numerical stability for p-values computed via the the exact McNemar test ( #306 ) nose is not required to use the library ( #302 ) Version 0.9.1 (2017-11-19) Downloads Source code (zip) Source code (tar.gz) New Features Added mlxtend.evaluate.bootstrap_point632_score to evaluate the performance of estimators using the .632 bootstrap. ( #283 ) New max_len parameter for the frequent itemset generation via the apriori function to allow for early stopping. ( #270 ) Changes All feature index tuples in SequentialFeatureSelector or now in sorted order. ( #262 ) The SequentialFeatureSelector now runs the continuation of the floating inclusion/exclusion as described in Novovicova & Kittler (1994). Note that this didn't cause any difference in performance on any of the test scenarios but could lead to better performance in certain edge cases. ( #262 ) utils.Counter now accepts a name variable to help distinguish between multiple counters, time precision can be set with the 'precision' kwarg and the new attribute end_time holds the time the last iteration completed. ( #278 via Mathew Savage ) Bug Fixes Fixed an deprecation error that occured with McNemar test when using SciPy 1.0. ( #283 ) Version 0.9.0 (2017-10-21) Downloads Source code (zip) Source code (tar.gz) New Features Added evaluate.permutation_test , a permutation test for hypothesis testing (or A/B testing) to test if two samples come from the same distribution. Or in other words, a procedure to test the null hypothesis that that two groups are not significantly different (e.g., a treatment and a control group). ( #250 ) Added 'leverage' and 'conviction as evaluation metrics to the frequent_patterns.association_rules function. ( #246 & #247 ) Added a loadings_ attribute to PrincipalComponentAnalysis to compute the factor loadings of the features on the principal components. ( #251 ) Allow grid search over classifiers/regressors in ensemble and stacking estimators. ( #259 ) New make_multiplexer_dataset function that creates a dataset generated by a n-bit Boolean multiplexer for evaluating supervised learning algorithms. ( #263 ) Added a new BootstrapOutOfBag class, an implementation of the out-of-bag bootstrap to evaluate supervised learning algorithms. ( #265 ) The parameters for StackingClassifier , StackingCVClassifier , StackingRegressor , StackingCVRegressor , and EnsembleVoteClassifier can now be tuned using scikit-learn's GridSearchCV ( #254 via James Bourbeau ) Changes The 'support' column returned by frequent_patterns.association_rules was changed to compute the support of \"antecedant union consequent\", and new antecedant support' and 'consequent support' column were added to avoid ambiguity. ( #245 ) Allow the OnehotTransactions to be cloned via scikit-learn's clone function, which is required by e.g., scikit-learn's FeatureUnion or GridSearchCV (via Iaroslav Shcherbatyi ). ( #249 ) Bug Fixes Fix issues with self._init_time parameter in _IterativeModel subclasses. ( #256 ) Fix imprecision bug that occurred in plot_ecdf when run on Python 2.7. ( 264 ) The vectors from SVD in PrincipalComponentAnalysis are now being scaled so that the eigenvalues via solver='eigen' and solver='svd' now store eigenvalues that have the same magnitudes. ( #251 ) Version 0.8.0 (2017-09-09) Downloads Source code (zip) Source code (tar.gz) New Features Added a mlxtend.evaluate.bootstrap that implements the ordinary nonparametric bootstrap to bootstrap a single statistic (for example, the mean. median, R^2 of a regression fit, and so forth) #232 SequentialFeatureSelecor 's k_features now accepts a string argument \"best\" or \"parsimonious\" for more \"automated\" feature selection. For instance, if \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. #238 Changes SequentialFeatureSelector now uses np.nanmean over normal mean to support scorers that may return np.nan #211 (via mrkaiser ) The skip_if_stuck parameter was removed from SequentialFeatureSelector in favor of a more efficient implementation comparing the conditional inclusion/exclusion results (in the floating versions) to the performances of previously sampled feature sets that were cached #237 ExhaustiveFeatureSelector was modified to consume substantially less memory #195 (via Adam Erickson ) Bug Fixes Fixed a bug where the SequentialFeatureSelector selected a feature subset larger than then specified via the k_features tuple max-value #213 Version 0.7.0 (2017-06-22) Downloads Source code (zip) Source code (tar.gz) New Features New mlxtend.plotting.ecdf function for plotting empirical cumulative distribution functions ( #196 ). New StackingCVRegressor for stacking regressors with out-of-fold predictions to prevent overfitting ( #201 via Eike Dehling ). Changes The TensorFlow estimator have been removed from mlxtend, since TensorFlow has now very convenient ways to build on estimators, which render those implementations obsolete. plot_decision_regions now supports plotting decision regions for more than 2 training features #189 , via James Bourbeau ). Parallel execution in mlxtend.feature_selection.SequentialFeatureSelector and mlxtend.feature_selection.ExhaustiveFeatureSelector is now performed over different feature subsets instead of the different cross-validation folds to better utilize machines with multiple processors if the number of features is large ( #193 , via @whalebot-helmsman ). Raise meaningful error messages if pandas DataFrame s or Python lists of lists are fed into the StackingCVClassifer as a fit arguments ( 198 ). The n_folds parameter of the StackingCVClassifier was changed to cv and can now accept any kind of cross validation technique that is available from scikit-learn. For example, StackingCVClassifier(..., cv=StratifiedKFold(n_splits=3)) or StackingCVClassifier(..., cv=GroupKFold(n_splits=3)) ( #203 , via Konstantinos Paliouras ). Bug Fixes SequentialFeatureSelector now correctly accepts a None argument for the scoring parameter to infer the default scoring metric from scikit-learn classifiers and regressors ( #171 ). The plot_decision_regions function now supports pre-existing axes objects generated via matplotlib's plt.subplots . ( #184 , see example ) Made math.num_combinations and math.num_permutations numerically stable for large numbers of combinations and permutations ( #200 ). Version 0.6.0 (2017-03-18) Downloads Source code (zip) Source code (tar.gz) New Features An association_rules function is implemented that allows to generate rules based on a list of frequent itemsets (via Joshua Goerner ). Changes Adds a black edgecolor to plots via plotting.plot_decision_regions to make markers more distinguishable from the background in matplotlib>=2.0 . The association submodule was renamed to frequent_patterns . Bug Fixes The DataFrame index of apriori results are now unique and ordered. Fixed typos in autompg and wine datasets (via James Bourbeau ). Version 0.5.1 (2017-02-14) Downloads Source code (zip) Source code (tar.gz) New Features The EnsembleVoteClassifier has a new refit attribute that prevents refitting classifiers if refit=False to save computational time. Added a new lift_score function in evaluate to compute lift score (via Batuhan Bardak ). StackingClassifier and StackingRegressor support multivariate targets if the underlying models do (via kernc ). StackingClassifier has a new use_features_in_secondary attribute like StackingCVClassifier . Changes Changed default verbosity level in SequentialFeatureSelector to 0 The EnsembleVoteClassifier now raises a NotFittedError if the estimator wasn't fit before calling predict . (via Anton Loss ) Added new TensorFlow variable initialization syntax to guarantee compatibility with TensorFlow 1.0 Bug Fixes Fixed wrong default value for k_features in SequentialFeatureSelector Cast selected feature subsets in the SequentialFeautureSelector as sets to prevent the iterator from getting stuck if the k_idx are different permutations of the same combination (via Zac Wellmer ). Fixed an issue with learning curves that caused the performance metrics to be reversed (via ipashchenko ) Fixed a bug that could occur in the SequentialFeatureSelector if there are similarly-well performing subsets in the floating variants (via Zac Wellmer ). Version 0.5.0 (2016-11-09) Downloads Source code (zip) Source code (tar.gz) New Features New ExhaustiveFeatureSelector estimator in mlxtend.feature_selection for evaluating all feature combinations in a specified range The StackingClassifier has a new parameter average_probas that is set to True by default to maintain the current behavior. A deprecation warning was added though, and it will default to False in future releases (0.6.0); average_probas=False will result in stacking of the level-1 predicted probabilities rather than averaging these. New StackingCVClassifier estimator in 'mlxtend.classifier' for implementing a stacking ensemble that uses cross-validation techniques for training the meta-estimator to avoid overfitting ( Reiichiro Nakano ) New OnehotTransactions encoder class added to the preprocessing submodule for transforming transaction data into a one-hot encoded array The SequentialFeatureSelector estimator in mlxtend.feature_selection now is safely stoppable mid-process by control+c, and deprecated print_progress in favor of a more tunable verbose parameter ( Will McGinnis ) New apriori function in association to extract frequent itemsets from transaction data for association rule mining New checkerboard_plot function in plotting to plot checkerboard tables / heat maps New mcnemar_table and mcnemar functions in evaluate to compute 2x2 contingency tables and McNemar's test Changes All plotting functions have been moved to mlxtend.plotting for compatibility reasons with continuous integration services and to make the installation of matplotlib optional for users of mlxtend 's core functionality Added a compatibility layer for scikit-learn 0.18 using the new model_selection module while maintaining backwards compatibility to scikit-learn 0.17. Bug Fixes mlxtend.plotting.plot_decision_regions now draws decision regions correctly if more than 4 class labels are present Raise AttributeError in plot_decision_regions when the X_higlight argument is a 1D array ( chkoar ) Version 0.4.2 (2016-08-24) Downloads Source code (zip) Source code (tar.gz) PDF documentation New Features Added preprocessing.CopyTransformer , a mock class that returns copies of imput arrays via transform and fit_transform Changes Added AppVeyor to CI to ensure MS Windows compatibility Dataset are now saved as compressed .txt or .csv files rather than being imported as Python objects feature_selection.SequentialFeatureSelector now supports the selection of k_features using a tuple to specify a \"min-max\" k_features range Added \"SVD solver\" option to the PrincipalComponentAnalysis Raise a AttributeError with \"not fitted\" message in SequentialFeatureSelector if transform or get_metric_dict are called prior to fit Use small, positive bias units in TfMultiLayerPerceptron 's hidden layer(s) if the activations are ReLUs in order to avoid dead neurons Added an optional clone_estimator parameter to the SequentialFeatureSelector that defaults to True , avoiding the modification of the original estimator objects More rigorous type and shape checks in the evaluate.plot_decision_regions function DenseTransformer now doesn't raise and error if the input array is not sparse API clean-up using scikit-learn's BaseEstimator as parent class for feature_selection.ColumnSelector Bug Fixes Fixed a problem when a tuple-range was provided as argument to the SequentialFeatureSelector 's k_features parameter and the scoring metric was more negative than -1 (e.g., as in scikit-learn's MSE scoring function) (wahutch](https://github.com/wahutch)) Fixed an AttributeError issue when verbose > 1 in StackingClassifier Fixed a bug in classifier.SoftmaxRegression where the mean values of the offsets were used to update the bias units rather than their sum Fixed rare bug in MLP _layer_mapping functions that caused a swap between the random number generation seed when initializing weights and biases Version 0.4.1 (2016-05-01) Downloads Source code (zip) Source code (tar.gz) PDF documentation New Features New TensorFlow estimator for Linear Regression ( tf_regressor.TfLinearRegression ) New k-means clustering estimator ( cluster.Kmeans ) New TensorFlow k-means clustering estimator ( tf_cluster.Kmeans ) Changes Due to refactoring of the estimator classes, the init_weights parameter of the fit methods was globally renamed to init_params Overall performance improvements of estimators due to code clean-up and refactoring Added several additional checks for correct array types and more meaningful exception messages Added optional dropout to the tf_classifier.TfMultiLayerPerceptron classifier for regularization Added an optional decay parameter to the tf_classifier.TfMultiLayerPerceptron classifier for adaptive learning via an exponential decay of the learning rate eta Replaced old NeuralNetMLP by more streamlined MultiLayerPerceptron ( classifier.MultiLayerPerceptron ); now also with softmax in the output layer and categorical cross-entropy loss. Unified init_params parameter for fit functions to continue training where the algorithm left off (if supported) Version 0.4.0 (2016-04-09) New Features New TfSoftmaxRegression classifier using Tensorflow ( tf_classifier.TfSoftmaxRegression ) New SoftmaxRegression classifier ( classifier.SoftmaxRegression ) New TfMultiLayerPerceptron classifier using Tensorflow ( tf_classifier.TfMultiLayerPerceptron ) New StackingRegressor ( regressor.StackingRegressor ) New StackingClassifier ( classifier.StackingClassifier ) New function for one-hot encoding of class labels ( preprocessing.one_hot ) Added GridSearch support to the SequentialFeatureSelector ( feature_selection/.SequentialFeatureSelector ) evaluate.plot_decision_regions improvements: Function now handles class y-class labels correctly if array is of type float Correct handling of input arguments markers and colors Accept an existing Axes via the ax argument New print_progress parameter for all generalized models and multi-layer neural networks for printing time elapsed, ETA, and the current cost of the current epoch Minibatch learning for classifier.LogisticRegression , classifier.Adaline , and regressor.LinearRegression plus streamlined API New Principal Component Analysis class via mlxtend.feature_extraction.PrincipalComponentAnalysis New RBF Kernel Principal Component Analysis class via mlxtend.feature_extraction.RBFKernelPCA New Linear Discriminant Analysis class via mlxtend.feature_extraction.LinearDiscriminantAnalysis Changes The column parameter in mlxtend.preprocessing.standardize now defaults to None to standardize all columns more conveniently Version 0.3.0 (2016-01-31) Downloads Source code (zip) Source code (tar.gz) New Features Added a progress bar tracker to classifier.NeuralNetMLP Added a function to score predicted vs. target class labels evaluate.scoring Added confusion matrix functions to create ( evaluate.confusion_matrix ) and plot ( evaluate.plot_confusion_matrix ) confusion matrices New style parameter and improved axis scaling in mlxtend.evaluate.plot_learning_curves Added loadlocal_mnist to mlxtend.data for streaming MNIST from a local byte files into numpy arrays New NeuralNetMLP parameters: random_weights , shuffle_init , shuffle_epoch New SFS features such as the generation of pandas DataFrame results tables and plotting functions (with confidence intervals, standard deviation, and standard error bars) Added support for regression estimators in SFS Added Boston housing dataset New shuffle parameter for classifier.NeuralNetMLP Changes The mlxtend.preprocessing.standardize function now optionally returns the parameters, which are estimated from the array, for re-use. A further improvement makes the standardize function smarter in order to avoid zero-division errors Cosmetic improvements to the evaluate.plot_decision_regions function such as hiding plot axes Renaming of classifier.EnsembleClassfier to classifier.EnsembleVoteClassifier Improved random weight initialization in Perceptron , Adaline , LinearRegression , and LogisticRegression Changed learning parameter of mlxtend.classifier.Adaline to solver and added \"normal equation\" as closed-form solution solver Hide y-axis labels in mlxtend.evaluate.plot_decision_regions in 1 dimensional evaluations Sequential Feature Selection algorithms were unified into a single SequentialFeatureSelector class with parameters to enable floating selection and toggle between forward and backward selection. Stratified sampling of MNIST (now 500x random samples from each of the 10 digit categories) Renaming mlxtend.plotting to mlxtend.general_plotting in order to distinguish general plotting function from specialized utility function such as evaluate.plot_decision_regions Version 0.2.9 (2015-07-14) Downloads Source code (zip) Source code (tar.gz) New Features Sequential Feature Selection algorithms: SFS, SFFS, SBS, and SFBS Changes Changed regularization & lambda parameters in LogisticRegression to single parameter l2_lambda Version 0.2.8 (2015-06-27) API changes: mlxtend.sklearn.EnsembleClassifier -> mlxtend.classifier.EnsembleClassifier mlxtend.sklearn.ColumnSelector -> mlxtend.feature_selection.ColumnSelector mlxtend.sklearn.DenseTransformer -> mlxtend.preprocessing.DenseTransformer mlxtend.pandas.standardizing -> mlxtend.preprocessing.standardizing mlxtend.pandas.minmax_scaling -> mlxtend.preprocessing.minmax_scaling mlxtend.matplotlib -> mlxtend.plotting Added momentum learning parameter (alpha coefficient) to mlxtend.classifier.NeuralNetMLP . Added adaptive learning rate (decrease constant) to mlxtend.classifier.NeuralNetMLP . mlxtend.pandas.minmax_scaling became mlxtend.preprocessing.minmax_scaling and also supports NumPy arrays now mlxtend.pandas.standardizing became mlxtend.preprocessing.standardizing and now supports both NumPy arrays and pandas DataFrames; also, now ddof parameters to set the degrees of freedom when calculating the standard deviation Version 0.2.7 (2015-06-20) Added multilayer perceptron (feedforward artificial neural network) classifier as mlxtend.classifier.NeuralNetMLP . Added 5000 labeled trainingsamples from the MNIST handwritten digits dataset to mlxtend.data Version 0.2.6 (2015-05-08) Added ordinary least square regression using different solvers (gradient and stochastic gradient descent, and the closed form solution (normal equation) Added option for random weight initialization to logistic regression classifier and updated l2 regularization Added wine dataset to mlxtend.data Added invert_axes parameter mlxtend.matplotlib.enrichtment_plot to optionally plot the \"Count\" on the x-axis New verbose parameter for mlxtend.sklearn.EnsembleClassifier by Alejandro C. Bahnsen Added mlxtend.pandas.standardizing to standardize columns in a Pandas DataFrame Added parameters linestyles and markers to mlxtend.matplotlib.enrichment_plot mlxtend.regression.lin_regplot automatically adds np.newaxis and works w. python lists Added tokenizers: mlxtend.text.extract_emoticons and mlxtend.text.extract_words_and_emoticons Version 0.2.5 (2015-04-17) Added Sequential Backward Selection (mlxtend.sklearn.SBS) Added X_highlight parameter to mlxtend.evaluate.plot_decision_regions for highlighting test data points. Added mlxtend.regression.lin_regplot to plot the fitted line from linear regression. Added mlxtend.matplotlib.stacked_barplot to conveniently produce stacked barplots using pandas DataFrame s. Added mlxtend.matplotlib.enrichment_plot Version 0.2.4 (2015-03-15) Added scoring to mlxtend.evaluate.learning_curves (by user pfsq) Fixed setup.py bug caused by the missing README.html file matplotlib.category_scatter for pandas DataFrames and Numpy arrays Version 0.2.3 (2015-03-11) Added Logistic regression Gradient descent and stochastic gradient descent perceptron was changed to Adaline (Adaptive Linear Neuron) Perceptron and Adaline for {0, 1} classes Added mlxtend.preprocessing.shuffle_arrays_unison function to shuffle one or more NumPy arrays. Added shuffle and random seed parameter to stochastic gradient descent classifier. Added rstrip parameter to mlxtend.file_io.find_filegroups to allow trimming of base names. Added ignore_substring parameter to mlxtend.file_io.find_filegroups and find_files . Replaced .rstrip in mlxtend.file_io.find_filegroups with more robust regex. Gridsearch support for mlxtend.sklearn.EnsembleClassifier Version 0.2.2 (2015-03-01) Improved robustness of EnsembleClassifier. Extended plot_decision_regions() functionality for plotting 1D decision boundaries. Function matplotlib.plot_decision_regions was reorganized to evaluate.plot_decision_regions . evaluate.plot_learning_curves() function added. Added Rosenblatt, gradient descent, and stochastic gradient descent perceptrons. Version 0.2.1 (2015-01-20) Added mlxtend.pandas.minmax_scaling - a function to rescale pandas DataFrame columns. Slight update to the EnsembleClassifier interface (additional voting parameter) Fixed EnsembleClassifier to return correct class labels if class labels are not integers from 0 to n. Added new matplotlib function to plot decision regions of classifiers. Version 0.2.0 (2015-01-13) Improved mlxtend.text.generalize_duplcheck to remove duplicates and prevent endless looping issue. Added recursive search parameter to mlxtend.file_io.find_files. Added check_ext parameter mlxtend.file_io.find_files to search based on file extensions. Default parameter to ignore invisible files for mlxtend.file_io.find. Added transform and fit_transform to the EnsembleClassifier . Added mlxtend.file_io.find_filegroups function. Version 0.1.9 (2015-01-10) Implemented scikit-learn EnsembleClassifier (majority voting rule) class. Version 0.1.8 (2015-01-07) Improvements to mlxtend.text.generalize_names to handle certain Dutch last name prefixes (van, van der, de, etc.). Added mlxtend.text.generalize_name_duplcheck function to apply mlxtend.text.generalize_names function to a pandas DataFrame without creating duplicates. Version 0.1.7 (2015-01-07) Added text utilities with name generalization function. Added and file_io utilities. Version 0.1.6 (2015-01-04) Added combinations and permutations estimators. Version 0.1.5 (2014-12-11) Added DenseTransformer for pipelines and grid search. Version 0.1.4 (2014-08-20) mean_centering function is now a Class that creates MeanCenterer objects that can be used to fit data via the fit method, and center data at the column means via the transform and fit_transform method. Version 0.1.3 (2014-08-19) Added preprocessing module and mean_centering function. Version 0.1.2 (2014-08-19) Added matplotlib utilities and remove_borders function. Version 0.1.1 (2014-08-13) Simplified code for ColumnSelector.","title":"Release Notes"},{"location":"CHANGELOG/#release-notes","text":"The CHANGELOG for the current development version is available at https://github.com/rasbt/mlxtend/blob/master/docs/sources/CHANGELOG.md .","title":"Release Notes"},{"location":"CHANGELOG/#version-0140-11-09-2018","text":"","title":"Version 0.14.0 (11-09-2018)"},{"location":"CHANGELOG/#downloads","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features","text":"Added a scatterplotmatrix function to the plotting module. ( #437 ) Added sample_weight option to StackingRegressor , StackingClassifier , StackingCVRegressor , StackingCVClassifier , EnsembleVoteClassifier . ( #438 ) Added a RandomHoldoutSplit class to perform a random train/valid split without rotation in SequentialFeatureSelector , scikit-learn GridSearchCV etc. ( #442 ) Added a PredefinedHoldoutSplit class to perform a train/valid split, based on user-specified indices, without rotation in SequentialFeatureSelector , scikit-learn GridSearchCV etc. ( #443 ) Created a new mlxtend.image submodule for working on image processing-related tasks. ( #457 ) Added a new convenience function extract_face_landmarks based on dlib to mlxtend.image . ( #458 ) Added a method='oob' option to the mlxtend.evaluate.bootstrap_point632_score method to compute the classic out-of-bag bootstrap estimate ( #459 ) Added a method='.632+' option to the mlxtend.evaluate.bootstrap_point632_score method to compute the .632+ bootstrap estimate that addresses the optimism bias of the .632 bootstrap ( #459 ) Added a new mlxtend.evaluate.ftest function to perform an F-test for comparing the accuracies of two or more classification models. ( #460 ) Added a new mlxtend.evaluate.combined_ftest_5x2cv function to perform an combined 5x2cv F-Test for comparing the performance of two models. ( #461 ) Added a new mlxtend.evaluate.difference_proportions test for comparing two proportions (e.g., classifier accuracies) ( #462 )","title":"New Features"},{"location":"CHANGELOG/#changes","text":"Addressed deprecations warnings in NumPy 0.15. ( #425 ) Because of complications in PR ( #459 ), Python 2.7 was now dropped; since official support for Python 2.7 by the Python Software Foundation is ending in approx. 12 months anyways, this re-focussing will hopefully free up some developer time with regard to not having to worry about backward compatibility","title":"Changes"},{"location":"CHANGELOG/#bug-fixes","text":"Fixed an issue with a missing import in mlxtend.plotting.plot_confusion_matrix . ( #428 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-0130-2018-07-20","text":"","title":"Version 0.13.0 (2018-07-20)"},{"location":"CHANGELOG/#downloads_1","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_1","text":"A meaningful error message is now raised when a cross-validation generator is used with SequentialFeatureSelector . ( #377 ) The SequentialFeatureSelector now accepts custom feature names via the fit method for more interpretable feature subset reports. ( #379 ) The SequentialFeatureSelector is now also compatible with Pandas DataFrames and uses DataFrame column-names for more interpretable feature subset reports. ( #379 ) ColumnSelector now works with Pandas DataFrames columns. ( #378 by Manuel Garrido ) The ExhaustiveFeatureSelector estimator in mlxtend.feature_selection now is safely stoppable mid-process by control+c. ( #380 ) Two new functions, vectorspace_orthonormalization and vectorspace_dimensionality were added to mlxtend.math to use the Gram-Schmidt process to convert a set of linearly independent vectors into a set of orthonormal basis vectors, and to compute the dimensionality of a vectorspace, respectively. ( #382 ) mlxtend.frequent_patterns.apriori now supports pandas SparseDataFrame s to generate frequent itemsets. ( #404 via Daniel Morales ) The plot_confusion_matrix function now has the ability to show normalized confusion matrix coefficients in addition to or instead of absolute confusion matrix coefficients with or without a colorbar. The text display method has been changed so that the full range of the colormap is used. The default size is also now set based on the number of classes. Added support for merging the meta features with the original input features in StackingRegressor (via use_features_in_secondary ) like it is already supported in the other Stacking classes. ( #418 ) Added a support_only to the association_rules function, which allow constructing association rules (based on the support metric only) for cropped input DataFrames that don't contain a complete set of antecedent and consequent support values. ( #421 )","title":"New Features"},{"location":"CHANGELOG/#changes_1","text":"Itemsets generated with apriori are now frozenset s ( #393 by William Laney and #394 ) Now raises an error if a input DataFrame to apriori contains non 0, 1, True, False values. #419 )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_1","text":"Allow mlxtend estimators to be cloned via scikit-learn's clone function. ( #374 ) Fixes bug to allow the correct use of refit=False in StackingRegressor and StackingCVRegressor ( #384 and ( #385 ) by selay01 ) Allow StackingClassifier to work with sparse matrices when use_features_in_secondary=True ( #408 by Floris Hoogenbook ) Allow StackingCVRegressor to work with sparse matrices when use_features_in_secondary=True ( #416 ) Allow StackingCVClassifier to work with sparse matrices when use_features_in_secondary=True ( #417 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-0120-2018-21-04","text":"","title":"Version 0.12.0 (2018-21-04)"},{"location":"CHANGELOG/#downloads_2","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_2","text":"A new feature_importance_permuation function to compute the feature importance in classifiers and regressors via the permutation importance method ( #358 ) The fit method of the ExhaustiveFeatureSelector now optionally accepts **fit_params for the estimator that is used for the feature selection. ( #354 by Zach Griffith) The fit method of the SequentialFeatureSelector now optionally accepts **fit_params for the estimator that is used for the feature selection. ( #350 by Zach Griffith)","title":"New Features"},{"location":"CHANGELOG/#changes_2","text":"Replaced plot_decision_regions colors by a colorblind-friendly palette and adds contour lines for decision regions. ( #348 ) All stacking estimators now raise NonFittedErrors if any method for inference is called prior to fitting the estimator. ( #353 ) Renamed the refit parameter of both the StackingClassifier and StackingCVClassifier to use_clones to be more explicit and less misleading. ( #368 )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_2","text":"Various changes in the documentation and documentation tools to fix formatting issues ( #363 ) Fixes a bug where the StackingCVClassifier 's meta features were not stored in the original order when shuffle=True ( #370 ) Many documentation improvements, including links to the User Guides in the API docs ( #371 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-0110-2018-03-14","text":"","title":"Version 0.11.0 (2018-03-14)"},{"location":"CHANGELOG/#downloads_3","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_3","text":"New function implementing the resampled paired t-test procedure ( paired_ttest_resampled ) to compare the performance of two models. ( #323 ) New function implementing the k-fold paired t-test procedure ( paired_ttest_kfold_cv ) to compare the performance of two models (also called k-hold-out paired t-test). ( #324 ) New function implementing the 5x2cv paired t-test procedure ( paired_ttest_5x2cv ) proposed by Dieterrich (1998) to compare the performance of two models. ( #325 ) A refit parameter was added to stacking classes (similar to the refit parameter in the EnsembleVoteClassifier ), to support classifiers and regressors that follow the scikit-learn API but are not compatible with scikit-learn's clone function. ( #322 ) The ColumnSelector now has a drop_axis argument to use it in pipelines with CountVectorizers . ( #333 )","title":"New Features"},{"location":"CHANGELOG/#changes_3","text":"Raises an informative error message if predict or predict_meta_features is called prior to calling the fit method in StackingRegressor and StackingCVRegressor . ( #315 ) The plot_decision_regions function now automatically determines the optimal setting based on the feature dimensions and supports anti-aliasing. The old res parameter has been deprecated. ( #309 by Guillaume Poirier-Morency ) Apriori code is faster due to optimization in onehot transformation and the amount of candidates generated by the apriori algorithm. ( #327 by Jakub Smid ) The OnehotTransactions class (which is typically often used in combination with the apriori function for association rule mining) is now more memory efficient as it uses boolean arrays instead of integer arrays. In addition, the OnehotTransactions class can be now be provided with sparse argument to generate sparse representations of the onehot matrix to further improve memory efficiency. ( #328 by Jakub Smid ) The OneHotTransactions has been deprecated and replaced by the TransactionEncoder . ( #332 The plot_decision_regions function now has three new parameters, scatter_kwargs , contourf_kwargs , and scatter_highlight_kwargs , that can be used to modify the plotting style. ( #342 by James Bourbeau )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_3","text":"Fixed issue when class labels were provided to the EnsembleVoteClassifier when refit was set to false . ( #322 ) Allow arrays with 16-bit and 32-bit precision in plot_decision_regions function. ( #337 ) Fixed bug that raised an indexing error if the number of items was <= 1 when computing association rules using the conviction metric. ( #340 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-0100-2017-12-22","text":"","title":"Version 0.10.0 (2017-12-22)"},{"location":"CHANGELOG/#downloads_4","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_4","text":"New store_train_meta_features parameter for fit in StackingCVRegressor. if True, train meta-features are stored in self.train_meta_features_ . New pred_meta_features method for StackingCVRegressor . People can get test meta-features using this method. ( #294 via takashioya ) The new store_train_meta_features attribute and pred_meta_features method for the StackingCVRegressor were also added to the StackingRegressor , StackingClassifier , and StackingCVClassifier ( #299 & #300 ) New function ( evaluate.mcnemar_tables ) for creating multiple 2x2 contigency from model predictions arrays that can be used in multiple McNemar (post-hoc) tests or Cochran's Q or F tests, etc. ( #307 ) New function ( evaluate.cochrans_q ) for performing Cochran's Q test to compare the accuracy of multiple classifiers. ( #310 )","title":"New Features"},{"location":"CHANGELOG/#changes_4","text":"Added requirements.txt to setup.py . ( #304 via Colin Carrol )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_4","text":"Improved numerical stability for p-values computed via the the exact McNemar test ( #306 ) nose is not required to use the library ( #302 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-091-2017-11-19","text":"","title":"Version 0.9.1 (2017-11-19)"},{"location":"CHANGELOG/#downloads_5","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_5","text":"Added mlxtend.evaluate.bootstrap_point632_score to evaluate the performance of estimators using the .632 bootstrap. ( #283 ) New max_len parameter for the frequent itemset generation via the apriori function to allow for early stopping. ( #270 )","title":"New Features"},{"location":"CHANGELOG/#changes_5","text":"All feature index tuples in SequentialFeatureSelector or now in sorted order. ( #262 ) The SequentialFeatureSelector now runs the continuation of the floating inclusion/exclusion as described in Novovicova & Kittler (1994). Note that this didn't cause any difference in performance on any of the test scenarios but could lead to better performance in certain edge cases. ( #262 ) utils.Counter now accepts a name variable to help distinguish between multiple counters, time precision can be set with the 'precision' kwarg and the new attribute end_time holds the time the last iteration completed. ( #278 via Mathew Savage )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_5","text":"Fixed an deprecation error that occured with McNemar test when using SciPy 1.0. ( #283 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-090-2017-10-21","text":"","title":"Version 0.9.0 (2017-10-21)"},{"location":"CHANGELOG/#downloads_6","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_6","text":"Added evaluate.permutation_test , a permutation test for hypothesis testing (or A/B testing) to test if two samples come from the same distribution. Or in other words, a procedure to test the null hypothesis that that two groups are not significantly different (e.g., a treatment and a control group). ( #250 ) Added 'leverage' and 'conviction as evaluation metrics to the frequent_patterns.association_rules function. ( #246 & #247 ) Added a loadings_ attribute to PrincipalComponentAnalysis to compute the factor loadings of the features on the principal components. ( #251 ) Allow grid search over classifiers/regressors in ensemble and stacking estimators. ( #259 ) New make_multiplexer_dataset function that creates a dataset generated by a n-bit Boolean multiplexer for evaluating supervised learning algorithms. ( #263 ) Added a new BootstrapOutOfBag class, an implementation of the out-of-bag bootstrap to evaluate supervised learning algorithms. ( #265 ) The parameters for StackingClassifier , StackingCVClassifier , StackingRegressor , StackingCVRegressor , and EnsembleVoteClassifier can now be tuned using scikit-learn's GridSearchCV ( #254 via James Bourbeau )","title":"New Features"},{"location":"CHANGELOG/#changes_6","text":"The 'support' column returned by frequent_patterns.association_rules was changed to compute the support of \"antecedant union consequent\", and new antecedant support' and 'consequent support' column were added to avoid ambiguity. ( #245 ) Allow the OnehotTransactions to be cloned via scikit-learn's clone function, which is required by e.g., scikit-learn's FeatureUnion or GridSearchCV (via Iaroslav Shcherbatyi ). ( #249 )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_6","text":"Fix issues with self._init_time parameter in _IterativeModel subclasses. ( #256 ) Fix imprecision bug that occurred in plot_ecdf when run on Python 2.7. ( 264 ) The vectors from SVD in PrincipalComponentAnalysis are now being scaled so that the eigenvalues via solver='eigen' and solver='svd' now store eigenvalues that have the same magnitudes. ( #251 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-080-2017-09-09","text":"","title":"Version 0.8.0 (2017-09-09)"},{"location":"CHANGELOG/#downloads_7","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_7","text":"Added a mlxtend.evaluate.bootstrap that implements the ordinary nonparametric bootstrap to bootstrap a single statistic (for example, the mean. median, R^2 of a regression fit, and so forth) #232 SequentialFeatureSelecor 's k_features now accepts a string argument \"best\" or \"parsimonious\" for more \"automated\" feature selection. For instance, if \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. #238","title":"New Features"},{"location":"CHANGELOG/#changes_7","text":"SequentialFeatureSelector now uses np.nanmean over normal mean to support scorers that may return np.nan #211 (via mrkaiser ) The skip_if_stuck parameter was removed from SequentialFeatureSelector in favor of a more efficient implementation comparing the conditional inclusion/exclusion results (in the floating versions) to the performances of previously sampled feature sets that were cached #237 ExhaustiveFeatureSelector was modified to consume substantially less memory #195 (via Adam Erickson )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_7","text":"Fixed a bug where the SequentialFeatureSelector selected a feature subset larger than then specified via the k_features tuple max-value #213","title":"Bug Fixes"},{"location":"CHANGELOG/#version-070-2017-06-22","text":"","title":"Version 0.7.0 (2017-06-22)"},{"location":"CHANGELOG/#downloads_8","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_8","text":"New mlxtend.plotting.ecdf function for plotting empirical cumulative distribution functions ( #196 ). New StackingCVRegressor for stacking regressors with out-of-fold predictions to prevent overfitting ( #201 via Eike Dehling ).","title":"New Features"},{"location":"CHANGELOG/#changes_8","text":"The TensorFlow estimator have been removed from mlxtend, since TensorFlow has now very convenient ways to build on estimators, which render those implementations obsolete. plot_decision_regions now supports plotting decision regions for more than 2 training features #189 , via James Bourbeau ). Parallel execution in mlxtend.feature_selection.SequentialFeatureSelector and mlxtend.feature_selection.ExhaustiveFeatureSelector is now performed over different feature subsets instead of the different cross-validation folds to better utilize machines with multiple processors if the number of features is large ( #193 , via @whalebot-helmsman ). Raise meaningful error messages if pandas DataFrame s or Python lists of lists are fed into the StackingCVClassifer as a fit arguments ( 198 ). The n_folds parameter of the StackingCVClassifier was changed to cv and can now accept any kind of cross validation technique that is available from scikit-learn. For example, StackingCVClassifier(..., cv=StratifiedKFold(n_splits=3)) or StackingCVClassifier(..., cv=GroupKFold(n_splits=3)) ( #203 , via Konstantinos Paliouras ).","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_8","text":"SequentialFeatureSelector now correctly accepts a None argument for the scoring parameter to infer the default scoring metric from scikit-learn classifiers and regressors ( #171 ). The plot_decision_regions function now supports pre-existing axes objects generated via matplotlib's plt.subplots . ( #184 , see example ) Made math.num_combinations and math.num_permutations numerically stable for large numbers of combinations and permutations ( #200 ).","title":"Bug Fixes"},{"location":"CHANGELOG/#version-060-2017-03-18","text":"","title":"Version 0.6.0 (2017-03-18)"},{"location":"CHANGELOG/#downloads_9","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_9","text":"An association_rules function is implemented that allows to generate rules based on a list of frequent itemsets (via Joshua Goerner ).","title":"New Features"},{"location":"CHANGELOG/#changes_9","text":"Adds a black edgecolor to plots via plotting.plot_decision_regions to make markers more distinguishable from the background in matplotlib>=2.0 . The association submodule was renamed to frequent_patterns .","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_9","text":"The DataFrame index of apriori results are now unique and ordered. Fixed typos in autompg and wine datasets (via James Bourbeau ).","title":"Bug Fixes"},{"location":"CHANGELOG/#version-051-2017-02-14","text":"","title":"Version 0.5.1 (2017-02-14)"},{"location":"CHANGELOG/#downloads_10","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_10","text":"The EnsembleVoteClassifier has a new refit attribute that prevents refitting classifiers if refit=False to save computational time. Added a new lift_score function in evaluate to compute lift score (via Batuhan Bardak ). StackingClassifier and StackingRegressor support multivariate targets if the underlying models do (via kernc ). StackingClassifier has a new use_features_in_secondary attribute like StackingCVClassifier .","title":"New Features"},{"location":"CHANGELOG/#changes_10","text":"Changed default verbosity level in SequentialFeatureSelector to 0 The EnsembleVoteClassifier now raises a NotFittedError if the estimator wasn't fit before calling predict . (via Anton Loss ) Added new TensorFlow variable initialization syntax to guarantee compatibility with TensorFlow 1.0","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_10","text":"Fixed wrong default value for k_features in SequentialFeatureSelector Cast selected feature subsets in the SequentialFeautureSelector as sets to prevent the iterator from getting stuck if the k_idx are different permutations of the same combination (via Zac Wellmer ). Fixed an issue with learning curves that caused the performance metrics to be reversed (via ipashchenko ) Fixed a bug that could occur in the SequentialFeatureSelector if there are similarly-well performing subsets in the floating variants (via Zac Wellmer ).","title":"Bug Fixes"},{"location":"CHANGELOG/#version-050-2016-11-09","text":"","title":"Version 0.5.0 (2016-11-09)"},{"location":"CHANGELOG/#downloads_11","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_11","text":"New ExhaustiveFeatureSelector estimator in mlxtend.feature_selection for evaluating all feature combinations in a specified range The StackingClassifier has a new parameter average_probas that is set to True by default to maintain the current behavior. A deprecation warning was added though, and it will default to False in future releases (0.6.0); average_probas=False will result in stacking of the level-1 predicted probabilities rather than averaging these. New StackingCVClassifier estimator in 'mlxtend.classifier' for implementing a stacking ensemble that uses cross-validation techniques for training the meta-estimator to avoid overfitting ( Reiichiro Nakano ) New OnehotTransactions encoder class added to the preprocessing submodule for transforming transaction data into a one-hot encoded array The SequentialFeatureSelector estimator in mlxtend.feature_selection now is safely stoppable mid-process by control+c, and deprecated print_progress in favor of a more tunable verbose parameter ( Will McGinnis ) New apriori function in association to extract frequent itemsets from transaction data for association rule mining New checkerboard_plot function in plotting to plot checkerboard tables / heat maps New mcnemar_table and mcnemar functions in evaluate to compute 2x2 contingency tables and McNemar's test","title":"New Features"},{"location":"CHANGELOG/#changes_11","text":"All plotting functions have been moved to mlxtend.plotting for compatibility reasons with continuous integration services and to make the installation of matplotlib optional for users of mlxtend 's core functionality Added a compatibility layer for scikit-learn 0.18 using the new model_selection module while maintaining backwards compatibility to scikit-learn 0.17.","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_11","text":"mlxtend.plotting.plot_decision_regions now draws decision regions correctly if more than 4 class labels are present Raise AttributeError in plot_decision_regions when the X_higlight argument is a 1D array ( chkoar )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-042-2016-08-24","text":"","title":"Version 0.4.2 (2016-08-24)"},{"location":"CHANGELOG/#downloads_12","text":"Source code (zip) Source code (tar.gz) PDF documentation","title":"Downloads"},{"location":"CHANGELOG/#new-features_12","text":"Added preprocessing.CopyTransformer , a mock class that returns copies of imput arrays via transform and fit_transform","title":"New Features"},{"location":"CHANGELOG/#changes_12","text":"Added AppVeyor to CI to ensure MS Windows compatibility Dataset are now saved as compressed .txt or .csv files rather than being imported as Python objects feature_selection.SequentialFeatureSelector now supports the selection of k_features using a tuple to specify a \"min-max\" k_features range Added \"SVD solver\" option to the PrincipalComponentAnalysis Raise a AttributeError with \"not fitted\" message in SequentialFeatureSelector if transform or get_metric_dict are called prior to fit Use small, positive bias units in TfMultiLayerPerceptron 's hidden layer(s) if the activations are ReLUs in order to avoid dead neurons Added an optional clone_estimator parameter to the SequentialFeatureSelector that defaults to True , avoiding the modification of the original estimator objects More rigorous type and shape checks in the evaluate.plot_decision_regions function DenseTransformer now doesn't raise and error if the input array is not sparse API clean-up using scikit-learn's BaseEstimator as parent class for feature_selection.ColumnSelector","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_12","text":"Fixed a problem when a tuple-range was provided as argument to the SequentialFeatureSelector 's k_features parameter and the scoring metric was more negative than -1 (e.g., as in scikit-learn's MSE scoring function) (wahutch](https://github.com/wahutch)) Fixed an AttributeError issue when verbose > 1 in StackingClassifier Fixed a bug in classifier.SoftmaxRegression where the mean values of the offsets were used to update the bias units rather than their sum Fixed rare bug in MLP _layer_mapping functions that caused a swap between the random number generation seed when initializing weights and biases","title":"Bug Fixes"},{"location":"CHANGELOG/#version-041-2016-05-01","text":"","title":"Version 0.4.1 (2016-05-01)"},{"location":"CHANGELOG/#downloads_13","text":"Source code (zip) Source code (tar.gz) PDF documentation","title":"Downloads"},{"location":"CHANGELOG/#new-features_13","text":"New TensorFlow estimator for Linear Regression ( tf_regressor.TfLinearRegression ) New k-means clustering estimator ( cluster.Kmeans ) New TensorFlow k-means clustering estimator ( tf_cluster.Kmeans )","title":"New Features"},{"location":"CHANGELOG/#changes_13","text":"Due to refactoring of the estimator classes, the init_weights parameter of the fit methods was globally renamed to init_params Overall performance improvements of estimators due to code clean-up and refactoring Added several additional checks for correct array types and more meaningful exception messages Added optional dropout to the tf_classifier.TfMultiLayerPerceptron classifier for regularization Added an optional decay parameter to the tf_classifier.TfMultiLayerPerceptron classifier for adaptive learning via an exponential decay of the learning rate eta Replaced old NeuralNetMLP by more streamlined MultiLayerPerceptron ( classifier.MultiLayerPerceptron ); now also with softmax in the output layer and categorical cross-entropy loss. Unified init_params parameter for fit functions to continue training where the algorithm left off (if supported)","title":"Changes"},{"location":"CHANGELOG/#version-040-2016-04-09","text":"","title":"Version 0.4.0 (2016-04-09)"},{"location":"CHANGELOG/#new-features_14","text":"New TfSoftmaxRegression classifier using Tensorflow ( tf_classifier.TfSoftmaxRegression ) New SoftmaxRegression classifier ( classifier.SoftmaxRegression ) New TfMultiLayerPerceptron classifier using Tensorflow ( tf_classifier.TfMultiLayerPerceptron ) New StackingRegressor ( regressor.StackingRegressor ) New StackingClassifier ( classifier.StackingClassifier ) New function for one-hot encoding of class labels ( preprocessing.one_hot ) Added GridSearch support to the SequentialFeatureSelector ( feature_selection/.SequentialFeatureSelector ) evaluate.plot_decision_regions improvements: Function now handles class y-class labels correctly if array is of type float Correct handling of input arguments markers and colors Accept an existing Axes via the ax argument New print_progress parameter for all generalized models and multi-layer neural networks for printing time elapsed, ETA, and the current cost of the current epoch Minibatch learning for classifier.LogisticRegression , classifier.Adaline , and regressor.LinearRegression plus streamlined API New Principal Component Analysis class via mlxtend.feature_extraction.PrincipalComponentAnalysis New RBF Kernel Principal Component Analysis class via mlxtend.feature_extraction.RBFKernelPCA New Linear Discriminant Analysis class via mlxtend.feature_extraction.LinearDiscriminantAnalysis","title":"New Features"},{"location":"CHANGELOG/#changes_14","text":"The column parameter in mlxtend.preprocessing.standardize now defaults to None to standardize all columns more conveniently","title":"Changes"},{"location":"CHANGELOG/#version-030-2016-01-31","text":"","title":"Version 0.3.0 (2016-01-31)"},{"location":"CHANGELOG/#downloads_14","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_15","text":"Added a progress bar tracker to classifier.NeuralNetMLP Added a function to score predicted vs. target class labels evaluate.scoring Added confusion matrix functions to create ( evaluate.confusion_matrix ) and plot ( evaluate.plot_confusion_matrix ) confusion matrices New style parameter and improved axis scaling in mlxtend.evaluate.plot_learning_curves Added loadlocal_mnist to mlxtend.data for streaming MNIST from a local byte files into numpy arrays New NeuralNetMLP parameters: random_weights , shuffle_init , shuffle_epoch New SFS features such as the generation of pandas DataFrame results tables and plotting functions (with confidence intervals, standard deviation, and standard error bars) Added support for regression estimators in SFS Added Boston housing dataset New shuffle parameter for classifier.NeuralNetMLP","title":"New Features"},{"location":"CHANGELOG/#changes_15","text":"The mlxtend.preprocessing.standardize function now optionally returns the parameters, which are estimated from the array, for re-use. A further improvement makes the standardize function smarter in order to avoid zero-division errors Cosmetic improvements to the evaluate.plot_decision_regions function such as hiding plot axes Renaming of classifier.EnsembleClassfier to classifier.EnsembleVoteClassifier Improved random weight initialization in Perceptron , Adaline , LinearRegression , and LogisticRegression Changed learning parameter of mlxtend.classifier.Adaline to solver and added \"normal equation\" as closed-form solution solver Hide y-axis labels in mlxtend.evaluate.plot_decision_regions in 1 dimensional evaluations Sequential Feature Selection algorithms were unified into a single SequentialFeatureSelector class with parameters to enable floating selection and toggle between forward and backward selection. Stratified sampling of MNIST (now 500x random samples from each of the 10 digit categories) Renaming mlxtend.plotting to mlxtend.general_plotting in order to distinguish general plotting function from specialized utility function such as evaluate.plot_decision_regions","title":"Changes"},{"location":"CHANGELOG/#version-029-2015-07-14","text":"","title":"Version 0.2.9 (2015-07-14)"},{"location":"CHANGELOG/#downloads_15","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_16","text":"Sequential Feature Selection algorithms: SFS, SFFS, SBS, and SFBS","title":"New Features"},{"location":"CHANGELOG/#changes_16","text":"Changed regularization & lambda parameters in LogisticRegression to single parameter l2_lambda","title":"Changes"},{"location":"CHANGELOG/#version-028-2015-06-27","text":"API changes: mlxtend.sklearn.EnsembleClassifier -> mlxtend.classifier.EnsembleClassifier mlxtend.sklearn.ColumnSelector -> mlxtend.feature_selection.ColumnSelector mlxtend.sklearn.DenseTransformer -> mlxtend.preprocessing.DenseTransformer mlxtend.pandas.standardizing -> mlxtend.preprocessing.standardizing mlxtend.pandas.minmax_scaling -> mlxtend.preprocessing.minmax_scaling mlxtend.matplotlib -> mlxtend.plotting Added momentum learning parameter (alpha coefficient) to mlxtend.classifier.NeuralNetMLP . Added adaptive learning rate (decrease constant) to mlxtend.classifier.NeuralNetMLP . mlxtend.pandas.minmax_scaling became mlxtend.preprocessing.minmax_scaling and also supports NumPy arrays now mlxtend.pandas.standardizing became mlxtend.preprocessing.standardizing and now supports both NumPy arrays and pandas DataFrames; also, now ddof parameters to set the degrees of freedom when calculating the standard deviation","title":"Version 0.2.8 (2015-06-27)"},{"location":"CHANGELOG/#version-027-2015-06-20","text":"Added multilayer perceptron (feedforward artificial neural network) classifier as mlxtend.classifier.NeuralNetMLP . Added 5000 labeled trainingsamples from the MNIST handwritten digits dataset to mlxtend.data","title":"Version 0.2.7 (2015-06-20)"},{"location":"CHANGELOG/#version-026-2015-05-08","text":"Added ordinary least square regression using different solvers (gradient and stochastic gradient descent, and the closed form solution (normal equation) Added option for random weight initialization to logistic regression classifier and updated l2 regularization Added wine dataset to mlxtend.data Added invert_axes parameter mlxtend.matplotlib.enrichtment_plot to optionally plot the \"Count\" on the x-axis New verbose parameter for mlxtend.sklearn.EnsembleClassifier by Alejandro C. Bahnsen Added mlxtend.pandas.standardizing to standardize columns in a Pandas DataFrame Added parameters linestyles and markers to mlxtend.matplotlib.enrichment_plot mlxtend.regression.lin_regplot automatically adds np.newaxis and works w. python lists Added tokenizers: mlxtend.text.extract_emoticons and mlxtend.text.extract_words_and_emoticons","title":"Version 0.2.6 (2015-05-08)"},{"location":"CHANGELOG/#version-025-2015-04-17","text":"Added Sequential Backward Selection (mlxtend.sklearn.SBS) Added X_highlight parameter to mlxtend.evaluate.plot_decision_regions for highlighting test data points. Added mlxtend.regression.lin_regplot to plot the fitted line from linear regression. Added mlxtend.matplotlib.stacked_barplot to conveniently produce stacked barplots using pandas DataFrame s. Added mlxtend.matplotlib.enrichment_plot","title":"Version 0.2.5 (2015-04-17)"},{"location":"CHANGELOG/#version-024-2015-03-15","text":"Added scoring to mlxtend.evaluate.learning_curves (by user pfsq) Fixed setup.py bug caused by the missing README.html file matplotlib.category_scatter for pandas DataFrames and Numpy arrays","title":"Version 0.2.4 (2015-03-15)"},{"location":"CHANGELOG/#version-023-2015-03-11","text":"Added Logistic regression Gradient descent and stochastic gradient descent perceptron was changed to Adaline (Adaptive Linear Neuron) Perceptron and Adaline for {0, 1} classes Added mlxtend.preprocessing.shuffle_arrays_unison function to shuffle one or more NumPy arrays. Added shuffle and random seed parameter to stochastic gradient descent classifier. Added rstrip parameter to mlxtend.file_io.find_filegroups to allow trimming of base names. Added ignore_substring parameter to mlxtend.file_io.find_filegroups and find_files . Replaced .rstrip in mlxtend.file_io.find_filegroups with more robust regex. Gridsearch support for mlxtend.sklearn.EnsembleClassifier","title":"Version 0.2.3 (2015-03-11)"},{"location":"CHANGELOG/#version-022-2015-03-01","text":"Improved robustness of EnsembleClassifier. Extended plot_decision_regions() functionality for plotting 1D decision boundaries. Function matplotlib.plot_decision_regions was reorganized to evaluate.plot_decision_regions . evaluate.plot_learning_curves() function added. Added Rosenblatt, gradient descent, and stochastic gradient descent perceptrons.","title":"Version 0.2.2 (2015-03-01)"},{"location":"CHANGELOG/#version-021-2015-01-20","text":"Added mlxtend.pandas.minmax_scaling - a function to rescale pandas DataFrame columns. Slight update to the EnsembleClassifier interface (additional voting parameter) Fixed EnsembleClassifier to return correct class labels if class labels are not integers from 0 to n. Added new matplotlib function to plot decision regions of classifiers.","title":"Version 0.2.1 (2015-01-20)"},{"location":"CHANGELOG/#version-020-2015-01-13","text":"Improved mlxtend.text.generalize_duplcheck to remove duplicates and prevent endless looping issue. Added recursive search parameter to mlxtend.file_io.find_files. Added check_ext parameter mlxtend.file_io.find_files to search based on file extensions. Default parameter to ignore invisible files for mlxtend.file_io.find. Added transform and fit_transform to the EnsembleClassifier . Added mlxtend.file_io.find_filegroups function.","title":"Version 0.2.0 (2015-01-13)"},{"location":"CHANGELOG/#version-019-2015-01-10","text":"Implemented scikit-learn EnsembleClassifier (majority voting rule) class.","title":"Version 0.1.9 (2015-01-10)"},{"location":"CHANGELOG/#version-018-2015-01-07","text":"Improvements to mlxtend.text.generalize_names to handle certain Dutch last name prefixes (van, van der, de, etc.). Added mlxtend.text.generalize_name_duplcheck function to apply mlxtend.text.generalize_names function to a pandas DataFrame without creating duplicates.","title":"Version 0.1.8 (2015-01-07)"},{"location":"CHANGELOG/#version-017-2015-01-07","text":"Added text utilities with name generalization function. Added and file_io utilities.","title":"Version 0.1.7 (2015-01-07)"},{"location":"CHANGELOG/#version-016-2015-01-04","text":"Added combinations and permutations estimators.","title":"Version 0.1.6 (2015-01-04)"},{"location":"CHANGELOG/#version-015-2014-12-11","text":"Added DenseTransformer for pipelines and grid search.","title":"Version 0.1.5 (2014-12-11)"},{"location":"CHANGELOG/#version-014-2014-08-20","text":"mean_centering function is now a Class that creates MeanCenterer objects that can be used to fit data via the fit method, and center data at the column means via the transform and fit_transform method.","title":"Version 0.1.4 (2014-08-20)"},{"location":"CHANGELOG/#version-013-2014-08-19","text":"Added preprocessing module and mean_centering function.","title":"Version 0.1.3 (2014-08-19)"},{"location":"CHANGELOG/#version-012-2014-08-19","text":"Added matplotlib utilities and remove_borders function.","title":"Version 0.1.2 (2014-08-19)"},{"location":"CHANGELOG/#version-011-2014-08-13","text":"Simplified code for ColumnSelector.","title":"Version 0.1.1 (2014-08-13)"},{"location":"CONTRIBUTING/","text":"How to Contribute I would be very happy about any kind of contributions that help to improve and extend the functionality of mlxtend. Quick Contributor Checklist This is a quick checklist about the different steps of a typical contribution to mlxtend (and other open source projects). Consider copying this list to a local text file (or the issue tracker) and checking off items as you go. [ ] Open a new \"issue\" on GitHub to discuss the new feature / bug fix [ ] Fork the mlxtend repository from GitHub (if not already done earlier) [ ] Create and check out a new topic branch (please don't make modifications in the master branch) [ ] Implement the new feature or apply the bug-fix [ ] Add appropriate unit test functions in mlxtend/*/tests [ ] Run nosetests ./mlxtend -sv and make sure that all unit tests pass [ ] Check/improve the test coverage by running nosetests ./mlxtend --with-coverage [ ] Check for style issues by running flake8 ./mlxtend (you may want to run nosetests again after you made modifications to the code) [ ] Add a note about the modification/contribution to the ./docs/sources/changelog.md file [ ] Modify documentation in the appropriate location under mlxtend/docs/sources/ [ ] Push the topic branch to the server and create a pull request [ ] Check the Travis-CI build passed at https://travis-ci.org/rasbt/mlxtend [ ] Check/improve the unit test coverage at https://coveralls.io/github/rasbt/mlxtend [ ] Check/improve the code health at https://landscape.io/github/rasbt/mlxtend Tips for Contributors Getting Started - Creating a New Issue and Forking the Repository If you don't have a GitHub account, yet, please create one to contribute to this project. Please submit a ticket for your issue to discuss the fix or new feature before too much time and effort is spent for the implementation. Fork the mlxtend repository from the GitHub web interface. Clone the mlxtend repository to your local machine by executing git clone https://github.com//mlxtend.git Syncing an Existing Fork If you already forked mlxtend earlier, you can bring you \"Fork\" up to date with the master branch as follows: 1. Configuring a remote that points to the upstream repository on GitHub List the current configured remote repository of your fork by executing $ git remote -v If you see something like origin https://github.com//mlxtend.git (fetch) origin https://github.com//mlxtend.git (push) you need to specify a new remote upstream repository via $ git remote add upstream https://github.com/rasbt/mlxtend.git Now, verify the new upstream repository you've specified for your fork by executing $ git remote -v You should see following output if everything is configured correctly: origin https://github.com//mlxtend.git (fetch) origin https://github.com//mlxtend.git (push) upstream https://github.com/rasbt/mlxtend.git (fetch) upstream https://github.com/rasbt/mlxtend.git (push) 2. Syncing your Fork First, fetch the updates of the original project's master branch by executing: $ git fetch upstream You should see the following output remote: Counting objects: xx, done. remote: Compressing objects: 100% (xx/xx), done. remote: Total xx (delta xx), reused xx (delta x) Unpacking objects: 100% (xx/xx), done. From https://github.com/rasbt/mlxtend * [new branch] master -> upstream/master This means that the commits to the rasbt/mlxtend master branch are now stored in the local branch upstream/master . If you are not already on your local project's master branch, execute $ git checkout master Finally, merge the changes in upstream/master to your local master branch by executing $ git merge upstream/master which will give you an output that looks similar to Updating xxx...xxx Fast-forward SOME FILE1 | 12 +++++++ SOME FILE2 | 10 +++++++ 2 files changed, 22 insertions(+), *The Main Workflow - Making Changes in a New Topic Branch Listed below are the 9 typical steps of a contribution. 1. Discussing the Feature or Modification Before you start coding, please discuss the new feature, bugfix, or other modification to the project on the project's issue tracker . Before you open a \"new issue,\" please do a quick search to see if a similar issue has been submitted already. 2. Creating a new feature branch Please avoid working directly on the master branch but create a new feature branch: $ git branch Switch to the new feature branch by executing $ git checkout 3. Developing the new feature / bug fix Now it's time to modify existing code or to contribute new code to the project. 4. Testing your code Add the respective unit tests and check if they pass: $ nosetests -sv Use the --with-coverage flag to ensure that all code is being covered in the unit tests: $ nosetests --with-coverage 5. Documenting changes Please add an entry to the mlxtend/docs/sources/changelog.md file. If it is a new feature, it would also be nice if you could update the documentation in appropriate location in mlxtend/sources . 6. Committing changes When you are ready to commit the changes, please provide a meaningful commit message: $ git add # or `git add .` $ git commit -m '' 7. Optional: squashing commits If you made multiple smaller commits, it would be nice if you could group them into a larger, summarizing commit. First, list your recent commit via Note Due to the improved GitHub UI, this is no longer necessary/encouraged. $ git log which will list the commits from newest to oldest in the following format by default: commit 046e3af8a9127df8eac879454f029937c8a31c41 Author: rasbt Date: Tue Nov 24 03:46:37 2015 -0500 fixed setup.py commit c3c00f6ba0e8f48bbe1c9081b8ae3817e57ecc5c Author: rasbt Date: Tue Nov 24 03:04:39 2015 -0500 documented feature x commit d87934fe8726c46f0b166d6290a3bf38915d6e75 Author: rasbt Date: Tue Nov 24 02:44:45 2015 -0500 added support for feature x Assuming that it would make sense to group these 3 commits into one, we can execute $ git rebase -i HEAD~3 which will bring our default git editor with the following contents: pick d87934f added support for feature x pick c3c00f6 documented feature x pick 046e3af fixed setup.py Since c3c00f6 and 046e3af are related to the original commit of feature x , let's keep the d87934f and squash the 2 following commits into this initial one by changes the lines to pick d87934f added support for feature x squash c3c00f6 documented feature x squash 046e3af fixed setup.py Now, save the changes in your editor. Now, quitting the editor will apply the rebase changes, and the editor will open a second time, prompting you to enter a new commit message. In this case, we could enter support for feature x to summarize the contributions. 8. Uploading changes Push your changes to a topic branch to the git server by executing: $ git push origin 9. Submitting a pull request Go to your GitHub repository online, select the new feature branch, and submit a new pull request: Notes for Developers Building the documentation The documentation is built via MkDocs ; to ensure that the documentation is rendered correctly, you can view the documentation locally by executing mkdocs serve from the mlxtend/docs directory. For example, ~/github/mlxtend/docs$ mkdocs serve 1. Building the API documentation To build the API documentation, navigate to mlxtend/docs and execute the make_api.py file from this directory via ~/github/mlxtend/docs$ python make_api.py This should place the API documentation into the correct directories into the two directories: mlxtend/docs/sources/api_modules mlxtend/docs/sources/api_subpackes 2. Editing the User Guide The documents containing code examples for the \"User Guide\" are generated from IPython Notebook files. In order to convert a IPython notebook file to markdown after editing, please follow the following steps: Modify or edit the existing notebook. Execute all cells in the current notebook and make sure that no errors occur. Convert the notebook to markdown using the ipynb2markdown.py converter ~/github/mlxtend/docs$ python ipynb2markdown.py --ipynb_path ./sources/user_guide/subpackage/notebookname.ipynb Note If you are adding a new document, please also include it in the pages section in the mlxtend/docs/mkdocs.yml file. 3. Building static HTML files of the documentation First, please check the documenation via localhost (http://127.0.0.1:8000/): ~/github/mlxtend/docs$ mkdocs serve Next, build the static HTML files of the mlxtend documentation via ~/github/mlxtend/docs$ mkdocs build --clean To deploy the documentation, execute ~/github/mlxtend/docs$ mkdocs gh-deploy --clean 4. Generate a PDF of the documentation To generate a PDF version of the documentation, simply cd into the mlxtend/docs directory and execute: python md2pdf.py Uploading a new version to PyPI 1. Creating a new testing environment Assuming we are using conda , create a new python environment via $ conda create -n 'mlxtend-testing' python=3 numpy scipy pandas Next, activate the environment by executing $ source activate mlxtend-testing 2. Installing the package from local files Test the installation by executing $ python setup.py install --record files.txt the --record files.txt flag will create a files.txt file listing the locations where these files will be installed. Try to import the package to see if it works, for example, by executing $ python -c 'import mlxtend; print(mlxtend.__file__)' If everything seems to be fine, remove the installation via $ cat files.txt | xargs rm -rf ; rm files.txt Next, test if pip is able to install the packages. First, navigate to a different directory, and from there, install the package: $ pip install mlxtend and uninstall it again $ pip uninstall mlxtend 3. Deploying the package Consider deploying the package to the PyPI test server first. The setup instructions can be found here . $ python setup.py sdist bdist_wheel upload -r https://testpypi.python.org/pypi Test if it can be installed from there by executing $ pip install -i https://testpypi.python.org/pypi mlxtend and uninstall it $ pip uninstall mlxtend After this dry-run succeeded, repeat this process using the \"real\" PyPI: $ python setup.py sdist bdist_wheel upload 4. Removing the virtual environment Finally, to cleanup our local drive, remove the virtual testing environment via $ conda remove --name 'mlxtend-testing' --all 5. Updating the conda-forge recipe Once a new version of mlxtend has been uploaded to PyPI, update the conda-forge build recipe at https://github.com/conda-forge/mlxtend-feedstock by changing the version number in the recipe/meta.yaml file appropriately.","title":"How To Contribute"},{"location":"CONTRIBUTING/#how-to-contribute","text":"I would be very happy about any kind of contributions that help to improve and extend the functionality of mlxtend.","title":"How to Contribute"},{"location":"CONTRIBUTING/#quick-contributor-checklist","text":"This is a quick checklist about the different steps of a typical contribution to mlxtend (and other open source projects). Consider copying this list to a local text file (or the issue tracker) and checking off items as you go. [ ] Open a new \"issue\" on GitHub to discuss the new feature / bug fix [ ] Fork the mlxtend repository from GitHub (if not already done earlier) [ ] Create and check out a new topic branch (please don't make modifications in the master branch) [ ] Implement the new feature or apply the bug-fix [ ] Add appropriate unit test functions in mlxtend/*/tests [ ] Run nosetests ./mlxtend -sv and make sure that all unit tests pass [ ] Check/improve the test coverage by running nosetests ./mlxtend --with-coverage [ ] Check for style issues by running flake8 ./mlxtend (you may want to run nosetests again after you made modifications to the code) [ ] Add a note about the modification/contribution to the ./docs/sources/changelog.md file [ ] Modify documentation in the appropriate location under mlxtend/docs/sources/ [ ] Push the topic branch to the server and create a pull request [ ] Check the Travis-CI build passed at https://travis-ci.org/rasbt/mlxtend [ ] Check/improve the unit test coverage at https://coveralls.io/github/rasbt/mlxtend [ ] Check/improve the code health at https://landscape.io/github/rasbt/mlxtend","title":"Quick Contributor Checklist"},{"location":"CONTRIBUTING/#tips-for-contributors","text":"","title":"Tips for Contributors"},{"location":"CONTRIBUTING/#getting-started-creating-a-new-issue-and-forking-the-repository","text":"If you don't have a GitHub account, yet, please create one to contribute to this project. Please submit a ticket for your issue to discuss the fix or new feature before too much time and effort is spent for the implementation. Fork the mlxtend repository from the GitHub web interface. Clone the mlxtend repository to your local machine by executing git clone https://github.com//mlxtend.git","title":"Getting Started - Creating a New Issue and Forking the Repository"},{"location":"CONTRIBUTING/#syncing-an-existing-fork","text":"If you already forked mlxtend earlier, you can bring you \"Fork\" up to date with the master branch as follows:","title":"Syncing an Existing Fork"},{"location":"CONTRIBUTING/#1-configuring-a-remote-that-points-to-the-upstream-repository-on-github","text":"List the current configured remote repository of your fork by executing $ git remote -v If you see something like origin https://github.com//mlxtend.git (fetch) origin https://github.com//mlxtend.git (push) you need to specify a new remote upstream repository via $ git remote add upstream https://github.com/rasbt/mlxtend.git Now, verify the new upstream repository you've specified for your fork by executing $ git remote -v You should see following output if everything is configured correctly: origin https://github.com//mlxtend.git (fetch) origin https://github.com//mlxtend.git (push) upstream https://github.com/rasbt/mlxtend.git (fetch) upstream https://github.com/rasbt/mlxtend.git (push)","title":"1. Configuring a remote that points to the upstream repository on GitHub"},{"location":"CONTRIBUTING/#2-syncing-your-fork","text":"First, fetch the updates of the original project's master branch by executing: $ git fetch upstream You should see the following output remote: Counting objects: xx, done. remote: Compressing objects: 100% (xx/xx), done. remote: Total xx (delta xx), reused xx (delta x) Unpacking objects: 100% (xx/xx), done. From https://github.com/rasbt/mlxtend * [new branch] master -> upstream/master This means that the commits to the rasbt/mlxtend master branch are now stored in the local branch upstream/master . If you are not already on your local project's master branch, execute $ git checkout master Finally, merge the changes in upstream/master to your local master branch by executing $ git merge upstream/master which will give you an output that looks similar to Updating xxx...xxx Fast-forward SOME FILE1 | 12 +++++++ SOME FILE2 | 10 +++++++ 2 files changed, 22 insertions(+),","title":"2. Syncing your Fork"},{"location":"CONTRIBUTING/#the-main-workflow-making-changes-in-a-new-topic-branch","text":"Listed below are the 9 typical steps of a contribution.","title":"*The Main Workflow - Making Changes in a New Topic Branch"},{"location":"CONTRIBUTING/#1-discussing-the-feature-or-modification","text":"Before you start coding, please discuss the new feature, bugfix, or other modification to the project on the project's issue tracker . Before you open a \"new issue,\" please do a quick search to see if a similar issue has been submitted already.","title":"1. Discussing the Feature or Modification"},{"location":"CONTRIBUTING/#2-creating-a-new-feature-branch","text":"Please avoid working directly on the master branch but create a new feature branch: $ git branch Switch to the new feature branch by executing $ git checkout ","title":"2. Creating a new feature branch"},{"location":"CONTRIBUTING/#3-developing-the-new-feature-bug-fix","text":"Now it's time to modify existing code or to contribute new code to the project.","title":"3. Developing the new feature / bug fix"},{"location":"CONTRIBUTING/#4-testing-your-code","text":"Add the respective unit tests and check if they pass: $ nosetests -sv Use the --with-coverage flag to ensure that all code is being covered in the unit tests: $ nosetests --with-coverage","title":"4. Testing your code"},{"location":"CONTRIBUTING/#5-documenting-changes","text":"Please add an entry to the mlxtend/docs/sources/changelog.md file. If it is a new feature, it would also be nice if you could update the documentation in appropriate location in mlxtend/sources .","title":"5. Documenting changes"},{"location":"CONTRIBUTING/#6-committing-changes","text":"When you are ready to commit the changes, please provide a meaningful commit message: $ git add # or `git add .` $ git commit -m ''","title":"6. Committing changes"},{"location":"CONTRIBUTING/#7-optional-squashing-commits","text":"If you made multiple smaller commits, it would be nice if you could group them into a larger, summarizing commit. First, list your recent commit via Note Due to the improved GitHub UI, this is no longer necessary/encouraged. $ git log which will list the commits from newest to oldest in the following format by default: commit 046e3af8a9127df8eac879454f029937c8a31c41 Author: rasbt Date: Tue Nov 24 03:46:37 2015 -0500 fixed setup.py commit c3c00f6ba0e8f48bbe1c9081b8ae3817e57ecc5c Author: rasbt Date: Tue Nov 24 03:04:39 2015 -0500 documented feature x commit d87934fe8726c46f0b166d6290a3bf38915d6e75 Author: rasbt Date: Tue Nov 24 02:44:45 2015 -0500 added support for feature x Assuming that it would make sense to group these 3 commits into one, we can execute $ git rebase -i HEAD~3 which will bring our default git editor with the following contents: pick d87934f added support for feature x pick c3c00f6 documented feature x pick 046e3af fixed setup.py Since c3c00f6 and 046e3af are related to the original commit of feature x , let's keep the d87934f and squash the 2 following commits into this initial one by changes the lines to pick d87934f added support for feature x squash c3c00f6 documented feature x squash 046e3af fixed setup.py Now, save the changes in your editor. Now, quitting the editor will apply the rebase changes, and the editor will open a second time, prompting you to enter a new commit message. In this case, we could enter support for feature x to summarize the contributions.","title":"7. Optional: squashing commits"},{"location":"CONTRIBUTING/#8-uploading-changes","text":"Push your changes to a topic branch to the git server by executing: $ git push origin ","title":"8. Uploading changes"},{"location":"CONTRIBUTING/#9-submitting-a-pull-request","text":"Go to your GitHub repository online, select the new feature branch, and submit a new pull request:","title":"9. Submitting a pull request"},{"location":"CONTRIBUTING/#notes-for-developers","text":"","title":"Notes for Developers"},{"location":"CONTRIBUTING/#building-the-documentation","text":"The documentation is built via MkDocs ; to ensure that the documentation is rendered correctly, you can view the documentation locally by executing mkdocs serve from the mlxtend/docs directory. For example, ~/github/mlxtend/docs$ mkdocs serve","title":"Building the documentation"},{"location":"CONTRIBUTING/#1-building-the-api-documentation","text":"To build the API documentation, navigate to mlxtend/docs and execute the make_api.py file from this directory via ~/github/mlxtend/docs$ python make_api.py This should place the API documentation into the correct directories into the two directories: mlxtend/docs/sources/api_modules mlxtend/docs/sources/api_subpackes","title":"1. Building the API documentation"},{"location":"CONTRIBUTING/#2-editing-the-user-guide","text":"The documents containing code examples for the \"User Guide\" are generated from IPython Notebook files. In order to convert a IPython notebook file to markdown after editing, please follow the following steps: Modify or edit the existing notebook. Execute all cells in the current notebook and make sure that no errors occur. Convert the notebook to markdown using the ipynb2markdown.py converter ~/github/mlxtend/docs$ python ipynb2markdown.py --ipynb_path ./sources/user_guide/subpackage/notebookname.ipynb Note If you are adding a new document, please also include it in the pages section in the mlxtend/docs/mkdocs.yml file.","title":"2. Editing the User Guide"},{"location":"CONTRIBUTING/#3-building-static-html-files-of-the-documentation","text":"First, please check the documenation via localhost (http://127.0.0.1:8000/): ~/github/mlxtend/docs$ mkdocs serve Next, build the static HTML files of the mlxtend documentation via ~/github/mlxtend/docs$ mkdocs build --clean To deploy the documentation, execute ~/github/mlxtend/docs$ mkdocs gh-deploy --clean","title":"3. Building static HTML files of the documentation"},{"location":"CONTRIBUTING/#4-generate-a-pdf-of-the-documentation","text":"To generate a PDF version of the documentation, simply cd into the mlxtend/docs directory and execute: python md2pdf.py","title":"4. Generate a PDF of the documentation"},{"location":"CONTRIBUTING/#uploading-a-new-version-to-pypi","text":"","title":"Uploading a new version to PyPI"},{"location":"CONTRIBUTING/#1-creating-a-new-testing-environment","text":"Assuming we are using conda , create a new python environment via $ conda create -n 'mlxtend-testing' python=3 numpy scipy pandas Next, activate the environment by executing $ source activate mlxtend-testing","title":"1. Creating a new testing environment"},{"location":"CONTRIBUTING/#2-installing-the-package-from-local-files","text":"Test the installation by executing $ python setup.py install --record files.txt the --record files.txt flag will create a files.txt file listing the locations where these files will be installed. Try to import the package to see if it works, for example, by executing $ python -c 'import mlxtend; print(mlxtend.__file__)' If everything seems to be fine, remove the installation via $ cat files.txt | xargs rm -rf ; rm files.txt Next, test if pip is able to install the packages. First, navigate to a different directory, and from there, install the package: $ pip install mlxtend and uninstall it again $ pip uninstall mlxtend","title":"2. Installing the package from local files"},{"location":"CONTRIBUTING/#3-deploying-the-package","text":"Consider deploying the package to the PyPI test server first. The setup instructions can be found here . $ python setup.py sdist bdist_wheel upload -r https://testpypi.python.org/pypi Test if it can be installed from there by executing $ pip install -i https://testpypi.python.org/pypi mlxtend and uninstall it $ pip uninstall mlxtend After this dry-run succeeded, repeat this process using the \"real\" PyPI: $ python setup.py sdist bdist_wheel upload","title":"3. Deploying the package"},{"location":"CONTRIBUTING/#4-removing-the-virtual-environment","text":"Finally, to cleanup our local drive, remove the virtual testing environment via $ conda remove --name 'mlxtend-testing' --all","title":"4. Removing the virtual environment"},{"location":"CONTRIBUTING/#5-updating-the-conda-forge-recipe","text":"Once a new version of mlxtend has been uploaded to PyPI, update the conda-forge build recipe at https://github.com/conda-forge/mlxtend-feedstock by changing the version number in the recipe/meta.yaml file appropriately.","title":"5. Updating the conda-forge recipe"},{"location":"USER_GUIDE_INDEX/","text":"User Guide Index classifier Adaline EnsembleVoteClassifier LogisticRegression MultiLayerPerceptron Perceptron SoftmaxRegression StackingClassifier StackingCVClassifier cluster Kmeans data autompg_data boston_housing_data iris_data loadlocal_mnist make_multiplexer_dataset mnist_data three_blobs_data wine_data evaluate bootstrap bootstrap_point632_score BootstrapOutOfBag cochrans_q confusion_matrix combined_ftest_5x2cv feature_importance_permutation ftest lift_score mcnemar_table mcnemar_tables mcnemar paired_ttest_5x2cv paired_ttest_kfold_cv paired_ttest_resampled permutation_test PredefinedHoldoutSplit proportion_difference RandomHoldoutSplit scoring feature_extraction LinearDiscriminantAnalysis PrincipalComponentAnalysis RBFKernelPCA feature_selection ColumnSelector ExhaustiveFeatureSelector SequentialFeatureSelector file_io find_filegroups find_files frequent_patterns apriori association_rules general concepts activation-functions gradient-optimization linear-gradient-derivative regularization-linear image extract_face_landmarks math num_combinations num_permutations plotting category_scatter checkerboard_plot ecdf enrichment_plot plot_confusion_matrix plot_decision_regions plot_learning_curves plot_linear_regression plot_sequential_feature_selection scatterplotmatrix stacked_barplot preprocessing CopyTransformer DenseTransformer MeanCenterer minmax_scaling one-hot_encoding shuffle_arrays_unison standardize TransactionEncoder regressor LinearRegression StackingCVRegressor StackingRegressor text generalize_names generalize_names_duplcheck tokenizer utils Counter","title":"User Guide Index"},{"location":"USER_GUIDE_INDEX/#user-guide-index","text":"","title":"User Guide Index"},{"location":"USER_GUIDE_INDEX/#classifier","text":"Adaline EnsembleVoteClassifier LogisticRegression MultiLayerPerceptron Perceptron SoftmaxRegression StackingClassifier StackingCVClassifier","title":"classifier"},{"location":"USER_GUIDE_INDEX/#cluster","text":"Kmeans","title":"cluster"},{"location":"USER_GUIDE_INDEX/#data","text":"autompg_data boston_housing_data iris_data loadlocal_mnist make_multiplexer_dataset mnist_data three_blobs_data wine_data","title":"data"},{"location":"USER_GUIDE_INDEX/#evaluate","text":"bootstrap bootstrap_point632_score BootstrapOutOfBag cochrans_q confusion_matrix combined_ftest_5x2cv feature_importance_permutation ftest lift_score mcnemar_table mcnemar_tables mcnemar paired_ttest_5x2cv paired_ttest_kfold_cv paired_ttest_resampled permutation_test PredefinedHoldoutSplit proportion_difference RandomHoldoutSplit scoring","title":"evaluate"},{"location":"USER_GUIDE_INDEX/#feature_extraction","text":"LinearDiscriminantAnalysis PrincipalComponentAnalysis RBFKernelPCA","title":"feature_extraction"},{"location":"USER_GUIDE_INDEX/#feature_selection","text":"ColumnSelector ExhaustiveFeatureSelector SequentialFeatureSelector","title":"feature_selection"},{"location":"USER_GUIDE_INDEX/#file_io","text":"find_filegroups find_files","title":"file_io"},{"location":"USER_GUIDE_INDEX/#frequent_patterns","text":"apriori association_rules","title":"frequent_patterns"},{"location":"USER_GUIDE_INDEX/#general-concepts","text":"activation-functions gradient-optimization linear-gradient-derivative regularization-linear","title":"general concepts"},{"location":"USER_GUIDE_INDEX/#image","text":"extract_face_landmarks","title":"image"},{"location":"USER_GUIDE_INDEX/#math","text":"num_combinations num_permutations","title":"math"},{"location":"USER_GUIDE_INDEX/#plotting","text":"category_scatter checkerboard_plot ecdf enrichment_plot plot_confusion_matrix plot_decision_regions plot_learning_curves plot_linear_regression plot_sequential_feature_selection scatterplotmatrix stacked_barplot","title":"plotting"},{"location":"USER_GUIDE_INDEX/#preprocessing","text":"CopyTransformer DenseTransformer MeanCenterer minmax_scaling one-hot_encoding shuffle_arrays_unison standardize TransactionEncoder","title":"preprocessing"},{"location":"USER_GUIDE_INDEX/#regressor","text":"LinearRegression StackingCVRegressor StackingRegressor","title":"regressor"},{"location":"USER_GUIDE_INDEX/#text","text":"generalize_names generalize_names_duplcheck tokenizer","title":"text"},{"location":"USER_GUIDE_INDEX/#utils","text":"Counter","title":"utils"},{"location":"cite/","text":"Citing mlxtend If you use mlxtend as part of your workflow in a scientific publication, please consider citing the mlxtend repository with the following DOI: Raschka, Sebastian (2018) MLxtend: Providing machine learning and data science utilities and extensions to Python's scientific computing stack . J Open Source Softw 3(24). @article{raschkas_2018_mlxtend, author = {Sebastian Raschka}, title = {MLxtend: Providing machine learning and data science utilities and extensions to Python\u2019s scientific computing stack}, journal = {The Journal of Open Source Software}, volume = {3}, number = {24}, month = apr, year = 2018, publisher = {The Open Journal}, doi = {10.21105/joss.00638}, url = {http://joss.theoj.org/papers/10.21105/joss.00638} }","title":"Citing Mlxtend"},{"location":"cite/#citing-mlxtend","text":"If you use mlxtend as part of your workflow in a scientific publication, please consider citing the mlxtend repository with the following DOI: Raschka, Sebastian (2018) MLxtend: Providing machine learning and data science utilities and extensions to Python's scientific computing stack . J Open Source Softw 3(24). @article{raschkas_2018_mlxtend, author = {Sebastian Raschka}, title = {MLxtend: Providing machine learning and data science utilities and extensions to Python\u2019s scientific computing stack}, journal = {The Journal of Open Source Software}, volume = {3}, number = {24}, month = apr, year = 2018, publisher = {The Open Journal}, doi = {10.21105/joss.00638}, url = {http://joss.theoj.org/papers/10.21105/joss.00638} }","title":"Citing mlxtend"},{"location":"contributors/","text":"Contributors For the current list of contributors to mlxtend, please see the GitHub contributor page at https://github.com/rasbt/mlxtend/graphs/contributors .","title":"Contributors"},{"location":"contributors/#contributors","text":"For the current list of contributors to mlxtend, please see the GitHub contributor page at https://github.com/rasbt/mlxtend/graphs/contributors .","title":"Contributors"},{"location":"discuss/","text":"Discuss Any questions or comments about mlxtend? Join the mlxtend mailing list on Google Groups!","title":"Discuss"},{"location":"discuss/#discuss","text":"Any questions or comments about mlxtend? Join the mlxtend mailing list on Google Groups!","title":"Discuss"},{"location":"installation/","text":"Installing mlxtend PyPI To install mlxtend, just execute pip install mlxtend Alternatively, you download the package manually from the Python Package Index https://pypi.python.org/pypi/mlxtend , unzip it, navigate into the package, and use the command: python setup.py install Upgrading via pip To upgrade an existing version of mlxtend from PyPI, execute pip install mlxtend --upgrade --no-deps Please note that the dependencies (NumPy and SciPy) will also be upgraded if you omit the --no-deps flag; use the --no-deps (\"no dependencies\") flag if you don't want this. Installing mlxtend from the source distribution In rare cases, users reported problems on certain systems with the default pip installation command, which installs mlxtend from the binary distribution (\"wheels\") on PyPI. If you should encounter similar problems, you could try to install mlxtend from the source distribution instead via pip install --no-binary :all: mlxtend Also, I would appreciate it if you could report any issues that occur when using pip install mlxtend in hope that we can fix these in future releases. Conda The mlxtend package is also available through conda forge . To install mlxtend using conda, use the following command: conda install mlxtend --channel conda-forge or simply conda install mlxtend if you added conda-forge to your channels ( conda config --add channels conda-forge ). Dev Version The mlxtend version on PyPI may always one step behind; you can install the latest development version from the GitHub repository by executing pip install git+git://github.com/rasbt/mlxtend.git Or, you can fork the GitHub repository from https://github.com/rasbt/mlxtend and install mlxtend from your local drive via python setup.py install","title":"Installation"},{"location":"installation/#installing-mlxtend","text":"","title":"Installing mlxtend"},{"location":"installation/#pypi","text":"To install mlxtend, just execute pip install mlxtend Alternatively, you download the package manually from the Python Package Index https://pypi.python.org/pypi/mlxtend , unzip it, navigate into the package, and use the command: python setup.py install","title":"PyPI"},{"location":"installation/#upgrading-via-pip","text":"To upgrade an existing version of mlxtend from PyPI, execute pip install mlxtend --upgrade --no-deps Please note that the dependencies (NumPy and SciPy) will also be upgraded if you omit the --no-deps flag; use the --no-deps (\"no dependencies\") flag if you don't want this.","title":"Upgrading via pip"},{"location":"installation/#installing-mlxtend-from-the-source-distribution","text":"In rare cases, users reported problems on certain systems with the default pip installation command, which installs mlxtend from the binary distribution (\"wheels\") on PyPI. If you should encounter similar problems, you could try to install mlxtend from the source distribution instead via pip install --no-binary :all: mlxtend Also, I would appreciate it if you could report any issues that occur when using pip install mlxtend in hope that we can fix these in future releases.","title":"Installing mlxtend from the source distribution"},{"location":"installation/#conda","text":"The mlxtend package is also available through conda forge . To install mlxtend using conda, use the following command: conda install mlxtend --channel conda-forge or simply conda install mlxtend if you added conda-forge to your channels ( conda config --add channels conda-forge ).","title":"Conda"},{"location":"installation/#dev-version","text":"The mlxtend version on PyPI may always one step behind; you can install the latest development version from the GitHub repository by executing pip install git+git://github.com/rasbt/mlxtend.git Or, you can fork the GitHub repository from https://github.com/rasbt/mlxtend and install mlxtend from your local drive via python setup.py install","title":"Dev Version"},{"location":"license/","text":"This project is released under a permissive new BSD open source license and commercially usable. There is no warranty; not even for merchantability or fitness for a particular purpose. In addition, you may use, copy, modify, and redistribute all artistic creative works (figures and images) included in this distribution under the directory according to the terms and conditions of the Creative Commons Attribution 4.0 International License. (Computer-generated graphics such as the plots produced by matplotlib fall under the BSD license mentioned above). new BSD License New BSD License Copyright (c) 2014-2018, Sebastian Raschka. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of mlxtend nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Creative Commons Attribution 4.0 International License mlxtend documentation figures are licensed under a Creative Commons Attribution 4.0 International License. http://creativecommons.org/licenses/by-sa/4.0/ . You are free to: Share \u2014 copy and redistribute the material in any medium or format Adapt \u2014 remix, transform, and build upon the material for any purpose, even commercially. The licensor cannot revoke these freedoms as long as you follow the license terms. Under the following terms: Attribution \u2014 You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. No additional restrictions \u2014 You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits.","title":"License"},{"location":"license/#new-bsd-license","text":"New BSD License Copyright (c) 2014-2018, Sebastian Raschka. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of mlxtend nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"new BSD License"},{"location":"license/#creative-commons-attribution-40-international-license","text":"mlxtend documentation figures are licensed under a Creative Commons Attribution 4.0 International License. http://creativecommons.org/licenses/by-sa/4.0/ .","title":"Creative Commons Attribution 4.0 International License"},{"location":"license/#you-are-free-to","text":"Share \u2014 copy and redistribute the material in any medium or format Adapt \u2014 remix, transform, and build upon the material for any purpose, even commercially. The licensor cannot revoke these freedoms as long as you follow the license terms.","title":"You are free to:"},{"location":"license/#under-the-following-terms","text":"Attribution \u2014 You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. No additional restrictions \u2014 You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits.","title":"Under the following terms:"},{"location":"api_modules/mlxtend.classifier/Adaline/","text":"Adaline Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"Adaline"},{"location":"api_modules/mlxtend.classifier/Adaline/#adaline","text":"Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/","title":"Adaline"},{"location":"api_modules/mlxtend.classifier/Adaline/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.classifier/Adaline/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/Adaline/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/Adaline/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/Adaline/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/EnsembleVoteClassifier/","text":"EnsembleVoteClassifier EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier.","title":"EnsembleVoteClassifier"},{"location":"api_modules/mlxtend.classifier/EnsembleVoteClassifier/#ensemblevoteclassifier","text":"EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/","title":"EnsembleVoteClassifier"},{"location":"api_modules/mlxtend.classifier/EnsembleVoteClassifier/#methods","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier.","title":"Methods"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/","text":"LogisticRegression LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"LogisticRegression"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#logisticregression","text":"LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/","title":"LogisticRegression"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/","text":"MultiLayerPerceptron MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"MultiLayerPerceptron"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#multilayerperceptron","text":"MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/","title":"MultiLayerPerceptron"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/Perceptron/","text":"Perceptron Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"Perceptron"},{"location":"api_modules/mlxtend.classifier/Perceptron/#perceptron","text":"Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/","title":"Perceptron"},{"location":"api_modules/mlxtend.classifier/Perceptron/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.classifier/Perceptron/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/Perceptron/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/Perceptron/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/Perceptron/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/","text":"SoftmaxRegression SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"SoftmaxRegression"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#softmaxregression","text":"SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/","title":"SoftmaxRegression"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/StackingCVClassifier/","text":"StackingCVClassifier StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingCVClassifier"},{"location":"api_modules/mlxtend.classifier/StackingCVClassifier/#stackingcvclassifier","text":"StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/","title":"StackingCVClassifier"},{"location":"api_modules/mlxtend.classifier/StackingCVClassifier/#methods","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_modules/mlxtend.classifier/StackingClassifier/","text":"StackingClassifier StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/ Methods fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingClassifier"},{"location":"api_modules/mlxtend.classifier/StackingClassifier/#stackingclassifier","text":"StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/","title":"StackingClassifier"},{"location":"api_modules/mlxtend.classifier/StackingClassifier/#methods","text":"fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_modules/mlxtend.cluster/Kmeans/","text":"Kmeans Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/ Methods fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"Kmeans"},{"location":"api_modules/mlxtend.cluster/Kmeans/#kmeans","text":"Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/","title":"Kmeans"},{"location":"api_modules/mlxtend.cluster/Kmeans/#methods","text":"fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.cluster/Kmeans/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.cluster/Kmeans/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.cluster/Kmeans/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.cluster/Kmeans/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.data/autompg_data/","text":"autompg_data autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/","title":"Autompg data"},{"location":"api_modules/mlxtend.data/autompg_data/#autompg_data","text":"autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/","title":"autompg_data"},{"location":"api_modules/mlxtend.data/boston_housing_data/","text":"boston_housing_data boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/","title":"Boston housing data"},{"location":"api_modules/mlxtend.data/boston_housing_data/#boston_housing_data","text":"boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/","title":"boston_housing_data"},{"location":"api_modules/mlxtend.data/iris_data/","text":"iris_data iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/","title":"Iris data"},{"location":"api_modules/mlxtend.data/iris_data/#iris_data","text":"iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/","title":"iris_data"},{"location":"api_modules/mlxtend.data/loadlocal_mnist/","text":"loadlocal_mnist loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/","title":"Loadlocal mnist"},{"location":"api_modules/mlxtend.data/loadlocal_mnist/#loadlocal_mnist","text":"loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/","title":"loadlocal_mnist"},{"location":"api_modules/mlxtend.data/make_multiplexer_dataset/","text":"make_multiplexer_dataset make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset","title":"Make multiplexer dataset"},{"location":"api_modules/mlxtend.data/make_multiplexer_dataset/#make_multiplexer_dataset","text":"make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset","title":"make_multiplexer_dataset"},{"location":"api_modules/mlxtend.data/mnist_data/","text":"mnist_data mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/","title":"Mnist data"},{"location":"api_modules/mlxtend.data/mnist_data/#mnist_data","text":"mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/","title":"mnist_data"},{"location":"api_modules/mlxtend.data/three_blobs_data/","text":"three_blobs_data three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data","title":"Three blobs data"},{"location":"api_modules/mlxtend.data/three_blobs_data/#three_blobs_data","text":"three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data","title":"three_blobs_data"},{"location":"api_modules/mlxtend.data/wine_data/","text":"wine_data wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"Wine data"},{"location":"api_modules/mlxtend.data/wine_data/#wine_data","text":"wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"wine_data"},{"location":"api_modules/mlxtend.evaluate/BootstrapOutOfBag/","text":"BootstrapOutOfBag BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/ Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn.","title":"BootstrapOutOfBag"},{"location":"api_modules/mlxtend.evaluate/BootstrapOutOfBag/#bootstrapoutofbag","text":"BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/","title":"BootstrapOutOfBag"},{"location":"api_modules/mlxtend.evaluate/BootstrapOutOfBag/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn.","title":"Methods"},{"location":"api_modules/mlxtend.evaluate/PredefinedHoldoutSplit/","text":"PredefinedHoldoutSplit PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting. Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"PredefinedHoldoutSplit"},{"location":"api_modules/mlxtend.evaluate/PredefinedHoldoutSplit/#predefinedholdoutsplit","text":"PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting.","title":"PredefinedHoldoutSplit"},{"location":"api_modules/mlxtend.evaluate/PredefinedHoldoutSplit/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"api_modules/mlxtend.evaluate/RandomHoldoutSplit/","text":"RandomHoldoutSplit RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"RandomHoldoutSplit"},{"location":"api_modules/mlxtend.evaluate/RandomHoldoutSplit/#randomholdoutsplit","text":"RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not","title":"RandomHoldoutSplit"},{"location":"api_modules/mlxtend.evaluate/RandomHoldoutSplit/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"api_modules/mlxtend.evaluate/bootstrap/","text":"bootstrap bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/","title":"Bootstrap"},{"location":"api_modules/mlxtend.evaluate/bootstrap/#bootstrap","text":"bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/","title":"bootstrap"},{"location":"api_modules/mlxtend.evaluate/bootstrap_point632_score/","text":"bootstrap_point632_score bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \"Estimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\" Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \"Improvements on Cross-Validation: The .632+ Bootstrap Method.\" Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/","title":"Bootstrap point632 score"},{"location":"api_modules/mlxtend.evaluate/bootstrap_point632_score/#bootstrap_point632_score","text":"bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \"Estimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\" Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \"Improvements on Cross-Validation: The .632+ Bootstrap Method.\" Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/","title":"bootstrap_point632_score"},{"location":"api_modules/mlxtend.evaluate/cochrans_q/","text":"cochrans_q cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/","title":"Cochrans q"},{"location":"api_modules/mlxtend.evaluate/cochrans_q/#cochrans_q","text":"cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/","title":"cochrans_q"},{"location":"api_modules/mlxtend.evaluate/combined_ftest_5x2cv/","text":"combined_ftest_5x2cv combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/","title":"Combined ftest 5x2cv"},{"location":"api_modules/mlxtend.evaluate/combined_ftest_5x2cv/#combined_ftest_5x2cv","text":"combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/","title":"combined_ftest_5x2cv"},{"location":"api_modules/mlxtend.evaluate/confusion_matrix/","text":"confusion_matrix confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/","title":"Confusion matrix"},{"location":"api_modules/mlxtend.evaluate/confusion_matrix/#confusion_matrix","text":"confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/","title":"confusion_matrix"},{"location":"api_modules/mlxtend.evaluate/feature_importance_permutation/","text":"feature_importance_permutation feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/","title":"Feature importance permutation"},{"location":"api_modules/mlxtend.evaluate/feature_importance_permutation/#feature_importance_permutation","text":"feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/","title":"feature_importance_permutation"},{"location":"api_modules/mlxtend.evaluate/ftest/","text":"ftest ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/","title":"Ftest"},{"location":"api_modules/mlxtend.evaluate/ftest/#ftest","text":"ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/","title":"ftest"},{"location":"api_modules/mlxtend.evaluate/lift_score/","text":"lift_score lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP / (TP+FP) ] / [ (TP+FN) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/","title":"Lift score"},{"location":"api_modules/mlxtend.evaluate/lift_score/#lift_score","text":"lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP / (TP+FP) ] / [ (TP+FN) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/","title":"lift_score"},{"location":"api_modules/mlxtend.evaluate/mcnemar/","text":"mcnemar mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/)","title":"Mcnemar"},{"location":"api_modules/mlxtend.evaluate/mcnemar/#mcnemar","text":"mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/)","title":"mcnemar"},{"location":"api_modules/mlxtend.evaluate/mcnemar_table/","text":"mcnemar_table mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/","title":"Mcnemar table"},{"location":"api_modules/mlxtend.evaluate/mcnemar_table/#mcnemar_table","text":"mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/","title":"mcnemar_table"},{"location":"api_modules/mlxtend.evaluate/mcnemar_tables/","text":"mcnemar_tables mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/)","title":"Mcnemar tables"},{"location":"api_modules/mlxtend.evaluate/mcnemar_tables/#mcnemar_tables","text":"mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/)","title":"mcnemar_tables"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_5x2cv/","text":"paired_ttest_5x2cv paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/","title":"Paired ttest 5x2cv"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_5x2cv/#paired_ttest_5x2cv","text":"paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/","title":"paired_ttest_5x2cv"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_kfold_cv/","text":"paired_ttest_kfold_cv paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/","title":"Paired ttest kfold cv"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_kfold_cv/#paired_ttest_kfold_cv","text":"paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/","title":"paired_ttest_kfold_cv"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_resampled/","text":"paired_ttest_resampled paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/","title":"Paired ttest resampled"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_resampled/#paired_ttest_resampled","text":"paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/","title":"paired_ttest_resampled"},{"location":"api_modules/mlxtend.evaluate/permutation_test/","text":"permutation_test permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/","title":"Permutation test"},{"location":"api_modules/mlxtend.evaluate/permutation_test/#permutation_test","text":"permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/","title":"permutation_test"},{"location":"api_modules/mlxtend.evaluate/proportion_difference/","text":"proportion_difference proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/","title":"Proportion difference"},{"location":"api_modules/mlxtend.evaluate/proportion_difference/#proportion_difference","text":"proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/","title":"proportion_difference"},{"location":"api_modules/mlxtend.evaluate/scoring/","text":"scoring scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"Scoring"},{"location":"api_modules/mlxtend.evaluate/scoring/#scoring","text":"scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"scoring"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/","text":"LinearDiscriminantAnalysis LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/ Methods fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors.","title":"LinearDiscriminantAnalysis"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#lineardiscriminantanalysis","text":"LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/","title":"LinearDiscriminantAnalysis"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#methods","text":"fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#license-bsd-3-clause","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#license-bsd-3-clause_1","text":"transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/","text":"PrincipalComponentAnalysis PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"PrincipalComponentAnalysis"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#principalcomponentanalysis","text":"PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/","title":"PrincipalComponentAnalysis"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#methods","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#license-bsd-3-clause","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#license-bsd-3-clause_1","text":"transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/","text":"RBFKernelPCA RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"RBFKernelPCA"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#rbfkernelpca","text":"RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/","title":"RBFKernelPCA"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#methods","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#license-bsd-3-clause","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#license-bsd-3-clause_1","text":"transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_selection/ColumnSelector/","text":"ColumnSelector ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features","title":"ColumnSelector"},{"location":"api_modules/mlxtend.feature_selection/ColumnSelector/#columnselector","text":"ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/","title":"ColumnSelector"},{"location":"api_modules/mlxtend.feature_selection/ColumnSelector/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features","title":"Methods"},{"location":"api_modules/mlxtend.feature_selection/ExhaustiveFeatureSelector/","text":"ExhaustiveFeatureSelector ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features}","title":"ExhaustiveFeatureSelector"},{"location":"api_modules/mlxtend.feature_selection/ExhaustiveFeatureSelector/#exhaustivefeatureselector","text":"ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/","title":"ExhaustiveFeatureSelector"},{"location":"api_modules/mlxtend.feature_selection/ExhaustiveFeatureSelector/#methods","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"api_modules/mlxtend.feature_selection/SequentialFeatureSelector/","text":"SequentialFeatureSelector SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"SequentialFeatureSelector"},{"location":"api_modules/mlxtend.feature_selection/SequentialFeatureSelector/#sequentialfeatureselector","text":"SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/","title":"SequentialFeatureSelector"},{"location":"api_modules/mlxtend.feature_selection/SequentialFeatureSelector/#methods","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"api_modules/mlxtend.file_io/find_filegroups/","text":"find_filegroups find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/","title":"Find filegroups"},{"location":"api_modules/mlxtend.file_io/find_filegroups/#find_filegroups","text":"find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/","title":"find_filegroups"},{"location":"api_modules/mlxtend.file_io/find_files/","text":"find_files find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"Find files"},{"location":"api_modules/mlxtend.file_io/find_files/#find_files","text":"find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"find_files"},{"location":"api_modules/mlxtend.frequent_patterns/apriori/","text":"apriori apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/","title":"Apriori"},{"location":"api_modules/mlxtend.frequent_patterns/apriori/#apriori","text":"apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/","title":"apriori"},{"location":"api_modules/mlxtend.frequent_patterns/association_rules/","text":"association_rules association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"Association rules"},{"location":"api_modules/mlxtend.frequent_patterns/association_rules/#association_rules","text":"association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"association_rules"},{"location":"api_modules/mlxtend.image/extract_face_landmarks/","text":"extract_face_landmarks extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"Extract face landmarks"},{"location":"api_modules/mlxtend.image/extract_face_landmarks/#extract_face_landmarks","text":"extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"extract_face_landmarks"},{"location":"api_modules/mlxtend.math/factorial/","text":"factorial factorial(n) None","title":"Factorial"},{"location":"api_modules/mlxtend.math/factorial/#factorial","text":"factorial(n) None","title":"factorial"},{"location":"api_modules/mlxtend.math/num_combinations/","text":"num_combinations num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/","title":"Num combinations"},{"location":"api_modules/mlxtend.math/num_combinations/#num_combinations","text":"num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/","title":"num_combinations"},{"location":"api_modules/mlxtend.math/num_permutations/","text":"num_permutations num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/","title":"Num permutations"},{"location":"api_modules/mlxtend.math/num_permutations/#num_permutations","text":"num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/","title":"num_permutations"},{"location":"api_modules/mlxtend.math/vectorspace_dimensionality/","text":"vectorspace_dimensionality vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set","title":"Vectorspace dimensionality"},{"location":"api_modules/mlxtend.math/vectorspace_dimensionality/#vectorspace_dimensionality","text":"vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set","title":"vectorspace_dimensionality"},{"location":"api_modules/mlxtend.math/vectorspace_orthonormalization/","text":"vectorspace_orthonormalization vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of orthogonal vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"Vectorspace orthonormalization"},{"location":"api_modules/mlxtend.math/vectorspace_orthonormalization/#vectorspace_orthonormalization","text":"vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of orthogonal vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"vectorspace_orthonormalization"},{"location":"api_modules/mlxtend.plotting/category_scatter/","text":"category_scatter category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/","title":"Category scatter"},{"location":"api_modules/mlxtend.plotting/category_scatter/#category_scatter","text":"category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/","title":"category_scatter"},{"location":"api_modules/mlxtend.plotting/checkerboard_plot/","text":"checkerboard_plot checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/","title":"Checkerboard plot"},{"location":"api_modules/mlxtend.plotting/checkerboard_plot/#checkerboard_plot","text":"checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/","title":"checkerboard_plot"},{"location":"api_modules/mlxtend.plotting/ecdf/","text":"ecdf ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/","title":"Ecdf"},{"location":"api_modules/mlxtend.plotting/ecdf/#ecdf","text":"ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/","title":"ecdf"},{"location":"api_modules/mlxtend.plotting/enrichment_plot/","text":"enrichment_plot enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/","title":"Enrichment plot"},{"location":"api_modules/mlxtend.plotting/enrichment_plot/#enrichment_plot","text":"enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/","title":"enrichment_plot"},{"location":"api_modules/mlxtend.plotting/plot_confusion_matrix/","text":"plot_confusion_matrix plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/","title":"Plot confusion matrix"},{"location":"api_modules/mlxtend.plotting/plot_confusion_matrix/#plot_confusion_matrix","text":"plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/","title":"plot_confusion_matrix"},{"location":"api_modules/mlxtend.plotting/plot_decision_regions/","text":"plot_decision_regions plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/","title":"Plot decision regions"},{"location":"api_modules/mlxtend.plotting/plot_decision_regions/#plot_decision_regions","text":"plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/","title":"plot_decision_regions"},{"location":"api_modules/mlxtend.plotting/plot_learning_curves/","text":"plot_learning_curves plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_learning_curves/","title":"Plot learning curves"},{"location":"api_modules/mlxtend.plotting/plot_learning_curves/#plot_learning_curves","text":"plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_learning_curves/","title":"plot_learning_curves"},{"location":"api_modules/mlxtend.plotting/plot_linear_regression/","text":"plot_linear_regression plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/","title":"Plot linear regression"},{"location":"api_modules/mlxtend.plotting/plot_linear_regression/#plot_linear_regression","text":"plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/","title":"plot_linear_regression"},{"location":"api_modules/mlxtend.plotting/plot_sequential_feature_selection/","text":"plot_sequential_feature_selection plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/","title":"Plot sequential feature selection"},{"location":"api_modules/mlxtend.plotting/plot_sequential_feature_selection/#plot_sequential_feature_selection","text":"plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/","title":"plot_sequential_feature_selection"},{"location":"api_modules/mlxtend.plotting/remove_borders/","text":"remove_borders remove_borders(axes, left=False, bottom=False, right=True, top=True) Remove chart junk from matplotlib plots. Parameters axes : iterable An iterable containing plt.gca() or plt.subplot() objects, e.g. [plt.gca()]. left : bool (default: False ) Hide left axis spine if True. bottom : bool (default: False ) Hide bottom axis spine if True. right : bool (default: True ) Hide right axis spine if True. top : bool (default: True ) Hide top axis spine if True. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/remove_chartjunk/","title":"Remove borders"},{"location":"api_modules/mlxtend.plotting/remove_borders/#remove_borders","text":"remove_borders(axes, left=False, bottom=False, right=True, top=True) Remove chart junk from matplotlib plots. Parameters axes : iterable An iterable containing plt.gca() or plt.subplot() objects, e.g. [plt.gca()]. left : bool (default: False ) Hide left axis spine if True. bottom : bool (default: False ) Hide bottom axis spine if True. right : bool (default: True ) Hide right axis spine if True. top : bool (default: True ) Hide top axis spine if True. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/remove_chartjunk/","title":"remove_borders"},{"location":"api_modules/mlxtend.plotting/scatterplotmatrix/","text":"scatterplotmatrix scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...)","title":"Scatterplotmatrix"},{"location":"api_modules/mlxtend.plotting/scatterplotmatrix/#scatterplotmatrix","text":"scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...)","title":"scatterplotmatrix"},{"location":"api_modules/mlxtend.plotting/stacked_barplot/","text":"stacked_barplot stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"Stacked barplot"},{"location":"api_modules/mlxtend.plotting/stacked_barplot/#stacked_barplot","text":"stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"stacked_barplot"},{"location":"api_modules/mlxtend.preprocessing/CopyTransformer/","text":"CopyTransformer CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array.","title":"CopyTransformer"},{"location":"api_modules/mlxtend.preprocessing/CopyTransformer/#copytransformer","text":"CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/","title":"CopyTransformer"},{"location":"api_modules/mlxtend.preprocessing/CopyTransformer/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array.","title":"Methods"},{"location":"api_modules/mlxtend.preprocessing/DenseTransformer/","text":"DenseTransformer DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array.","title":"DenseTransformer"},{"location":"api_modules/mlxtend.preprocessing/DenseTransformer/#densetransformer","text":"DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/","title":"DenseTransformer"},{"location":"api_modules/mlxtend.preprocessing/DenseTransformer/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array.","title":"Methods"},{"location":"api_modules/mlxtend.preprocessing/MeanCenterer/","text":"MeanCenterer MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/ Methods fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered.","title":"MeanCenterer"},{"location":"api_modules/mlxtend.preprocessing/MeanCenterer/#meancenterer","text":"MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/","title":"MeanCenterer"},{"location":"api_modules/mlxtend.preprocessing/MeanCenterer/#methods","text":"fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered.","title":"Methods"},{"location":"api_modules/mlxtend.preprocessing/OnehotTransactions/","text":"OnehotTransactions OnehotTransactions( args, * kwargs) Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/ Methods fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"OnehotTransactions"},{"location":"api_modules/mlxtend.preprocessing/OnehotTransactions/#onehottransactions","text":"OnehotTransactions( args, * kwargs) Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/","title":"OnehotTransactions"},{"location":"api_modules/mlxtend.preprocessing/OnehotTransactions/#methods","text":"fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"Methods"},{"location":"api_modules/mlxtend.preprocessing/TransactionEncoder/","text":"TransactionEncoder TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/ Methods fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"TransactionEncoder"},{"location":"api_modules/mlxtend.preprocessing/TransactionEncoder/#transactionencoder","text":"TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/","title":"TransactionEncoder"},{"location":"api_modules/mlxtend.preprocessing/TransactionEncoder/#methods","text":"fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"Methods"},{"location":"api_modules/mlxtend.preprocessing/minmax_scaling/","text":"minmax_scaling minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/","title":"Minmax scaling"},{"location":"api_modules/mlxtend.preprocessing/minmax_scaling/#minmax_scaling","text":"minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/","title":"minmax_scaling"},{"location":"api_modules/mlxtend.preprocessing/one_hot/","text":"one_hot one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/","title":"One hot"},{"location":"api_modules/mlxtend.preprocessing/one_hot/#one_hot","text":"one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/","title":"one_hot"},{"location":"api_modules/mlxtend.preprocessing/shuffle_arrays_unison/","text":"shuffle_arrays_unison shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/","title":"Shuffle arrays unison"},{"location":"api_modules/mlxtend.preprocessing/shuffle_arrays_unison/#shuffle_arrays_unison","text":"shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/","title":"shuffle_arrays_unison"},{"location":"api_modules/mlxtend.preprocessing/standardize/","text":"standardize standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"Standardize"},{"location":"api_modules/mlxtend.preprocessing/standardize/#standardize","text":"standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"standardize"},{"location":"api_modules/mlxtend.regressor/LinearRegression/","text":"LinearRegression LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"LinearRegression"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#linearregression","text":"LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/","title":"LinearRegression"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.regressor/StackingCVRegressor/","text":"StackingCVRegressor StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingCVRegressor"},{"location":"api_modules/mlxtend.regressor/StackingCVRegressor/#stackingcvregressor","text":"StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/","title":"StackingCVRegressor"},{"location":"api_modules/mlxtend.regressor/StackingCVRegressor/#methods","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_modules/mlxtend.regressor/StackingRegressor/","text":"StackingRegressor StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self Properties coef_ None intercept_ None","title":"StackingRegressor"},{"location":"api_modules/mlxtend.regressor/StackingRegressor/#stackingregressor","text":"StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/","title":"StackingRegressor"},{"location":"api_modules/mlxtend.regressor/StackingRegressor/#methods","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_modules/mlxtend.regressor/StackingRegressor/#properties","text":"coef_ None intercept_ None","title":"Properties"},{"location":"api_modules/mlxtend.text/generalize_names/","text":"generalize_names generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/","title":"Generalize names"},{"location":"api_modules/mlxtend.text/generalize_names/#generalize_names","text":"generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/","title":"generalize_names"},{"location":"api_modules/mlxtend.text/generalize_names_duplcheck/","text":"generalize_names_duplcheck generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/","title":"Generalize names duplcheck"},{"location":"api_modules/mlxtend.text/generalize_names_duplcheck/#generalize_names_duplcheck","text":"generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/","title":"generalize_names_duplcheck"},{"location":"api_modules/mlxtend.text/tokenizer_emoticons/","text":"tokenizer_emoticons tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/","title":"Tokenizer emoticons"},{"location":"api_modules/mlxtend.text/tokenizer_emoticons/#tokenizer_emoticons","text":"tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/","title":"tokenizer_emoticons"},{"location":"api_modules/mlxtend.text/tokenizer_words_and_emoticons/","text":"tokenizer_words_and_emoticons tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"Tokenizer words and emoticons"},{"location":"api_modules/mlxtend.text/tokenizer_words_and_emoticons/#tokenizer_words_and_emoticons","text":"tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"tokenizer_words_and_emoticons"},{"location":"api_modules/mlxtend.utils/Counter/","text":"Counter Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/ Methods update() Print current iteration and time elapsed.","title":"Counter"},{"location":"api_modules/mlxtend.utils/Counter/#counter","text":"Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/","title":"Counter"},{"location":"api_modules/mlxtend.utils/Counter/#methods","text":"update() Print current iteration and time elapsed.","title":"Methods"},{"location":"api_modules/mlxtend.utils/assert_raises/","text":"assert_raises assert_raises(exception_type, message, func, args, * kwargs) Check that an exception is raised with a specific message Parameters exception_type : exception The exception that should be raised message : str (default: None) The error message that should be raised. Ignored if False or None. func : callable The function that raises the exception *args : positional arguments to func . **kwargs : keyword arguments to func","title":"Assert raises"},{"location":"api_modules/mlxtend.utils/assert_raises/#assert_raises","text":"assert_raises(exception_type, message, func, args, * kwargs) Check that an exception is raised with a specific message Parameters exception_type : exception The exception that should be raised message : str (default: None) The error message that should be raised. Ignored if False or None. func : callable The function that raises the exception *args : positional arguments to func . **kwargs : keyword arguments to func","title":"assert_raises"},{"location":"api_modules/mlxtend.utils/check_Xy/","text":"check_Xy check_Xy(X, y, y_int=True) None","title":"check Xy"},{"location":"api_modules/mlxtend.utils/check_Xy/#check_xy","text":"check_Xy(X, y, y_int=True) None","title":"check_Xy"},{"location":"api_modules/mlxtend.utils/format_kwarg_dictionaries/","text":"format_kwarg_dictionaries format_kwarg_dictionaries(default_kwargs=None, user_kwargs=None, protected_keys=None) Function to combine default and user specified kwargs dictionaries Parameters default_kwargs : dict, optional Default kwargs (default is None). user_kwargs : dict, optional User specified kwargs (default is None). protected_keys : array_like, optional Sequence of keys to be removed from the returned dictionary (default is None). Returns formatted_kwargs : dict Formatted kwargs dictionary.","title":"Format kwarg dictionaries"},{"location":"api_modules/mlxtend.utils/format_kwarg_dictionaries/#format_kwarg_dictionaries","text":"format_kwarg_dictionaries(default_kwargs=None, user_kwargs=None, protected_keys=None) Function to combine default and user specified kwargs dictionaries Parameters default_kwargs : dict, optional Default kwargs (default is None). user_kwargs : dict, optional User specified kwargs (default is None). protected_keys : array_like, optional Sequence of keys to be removed from the returned dictionary (default is None). Returns formatted_kwargs : dict Formatted kwargs dictionary.","title":"format_kwarg_dictionaries"},{"location":"api_subpackages/mlxtend._base/","text":"mlxtend version: 0.14.0dev","title":"Mlxtend. base"},{"location":"api_subpackages/mlxtend.classifier/","text":"mlxtend version: 0.14.0dev Adaline Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause EnsembleVoteClassifier EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier. LogisticRegression LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause MultiLayerPerceptron MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause Perceptron Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause SoftmaxRegression SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause StackingCVClassifier StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self StackingClassifier StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/ Methods fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Mlxtend.classifier"},{"location":"api_subpackages/mlxtend.classifier/#adaline","text":"Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/","title":"Adaline"},{"location":"api_subpackages/mlxtend.classifier/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#ensemblevoteclassifier","text":"EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/","title":"EnsembleVoteClassifier"},{"location":"api_subpackages/mlxtend.classifier/#methods_1","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier.","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#logisticregression","text":"LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/","title":"LogisticRegression"},{"location":"api_subpackages/mlxtend.classifier/#methods_2","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_2","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_2","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_3","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_3","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#multilayerperceptron","text":"MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/","title":"MultiLayerPerceptron"},{"location":"api_subpackages/mlxtend.classifier/#methods_3","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_4","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_4","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_5","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_5","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#perceptron","text":"Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/","title":"Perceptron"},{"location":"api_subpackages/mlxtend.classifier/#methods_4","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_6","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_6","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_7","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_7","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#softmaxregression","text":"SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/","title":"SoftmaxRegression"},{"location":"api_subpackages/mlxtend.classifier/#methods_5","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_8","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_8","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_9","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_9","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#stackingcvclassifier","text":"StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/","title":"StackingCVClassifier"},{"location":"api_subpackages/mlxtend.classifier/#methods_6","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#stackingclassifier","text":"StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/","title":"StackingClassifier"},{"location":"api_subpackages/mlxtend.classifier/#methods_7","text":"fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_subpackages/mlxtend.cluster/","text":"mlxtend version: 0.14.0dev Kmeans Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/ Methods fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"Mlxtend.cluster"},{"location":"api_subpackages/mlxtend.cluster/#kmeans","text":"Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/","title":"Kmeans"},{"location":"api_subpackages/mlxtend.cluster/#methods","text":"fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.cluster/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.cluster/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.cluster/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.cluster/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.data/","text":"mlxtend version: 0.14.0dev autompg_data autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/ boston_housing_data boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/ iris_data iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/ loadlocal_mnist loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/ make_multiplexer_dataset make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset mnist_data mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/ three_blobs_data three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data wine_data wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"Mlxtend.data"},{"location":"api_subpackages/mlxtend.data/#autompg_data","text":"autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/","title":"autompg_data"},{"location":"api_subpackages/mlxtend.data/#boston_housing_data","text":"boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/","title":"boston_housing_data"},{"location":"api_subpackages/mlxtend.data/#iris_data","text":"iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/","title":"iris_data"},{"location":"api_subpackages/mlxtend.data/#loadlocal_mnist","text":"loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/","title":"loadlocal_mnist"},{"location":"api_subpackages/mlxtend.data/#make_multiplexer_dataset","text":"make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset","title":"make_multiplexer_dataset"},{"location":"api_subpackages/mlxtend.data/#mnist_data","text":"mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/","title":"mnist_data"},{"location":"api_subpackages/mlxtend.data/#three_blobs_data","text":"three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data","title":"three_blobs_data"},{"location":"api_subpackages/mlxtend.data/#wine_data","text":"wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"wine_data"},{"location":"api_subpackages/mlxtend.evaluate/","text":"mlxtend version: 0.14.0dev BootstrapOutOfBag BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/ Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. PredefinedHoldoutSplit PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting. Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split. RandomHoldoutSplit RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split. bootstrap bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/ bootstrap_point632_score bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \"Estimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\" Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \"Improvements on Cross-Validation: The .632+ Bootstrap Method.\" Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/ cochrans_q cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/ combined_ftest_5x2cv combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/ confusion_matrix confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/ feature_importance_permutation feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/ ftest ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/ lift_score lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP / (TP+FP) ] / [ (TP+FN) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/ mcnemar mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/) mcnemar_table mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/ mcnemar_tables mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/) paired_ttest_5x2cv paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/ paired_ttest_kfold_cv paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/ paired_ttest_resampled paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/ permutation_test permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/ proportion_difference proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/ scoring scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"Mlxtend.evaluate"},{"location":"api_subpackages/mlxtend.evaluate/#bootstrapoutofbag","text":"BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/","title":"BootstrapOutOfBag"},{"location":"api_subpackages/mlxtend.evaluate/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn.","title":"Methods"},{"location":"api_subpackages/mlxtend.evaluate/#predefinedholdoutsplit","text":"PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting.","title":"PredefinedHoldoutSplit"},{"location":"api_subpackages/mlxtend.evaluate/#methods_1","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"api_subpackages/mlxtend.evaluate/#randomholdoutsplit","text":"RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not","title":"RandomHoldoutSplit"},{"location":"api_subpackages/mlxtend.evaluate/#methods_2","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"api_subpackages/mlxtend.evaluate/#bootstrap","text":"bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/","title":"bootstrap"},{"location":"api_subpackages/mlxtend.evaluate/#bootstrap_point632_score","text":"bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \"Estimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\" Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \"Improvements on Cross-Validation: The .632+ Bootstrap Method.\" Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/","title":"bootstrap_point632_score"},{"location":"api_subpackages/mlxtend.evaluate/#cochrans_q","text":"cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/","title":"cochrans_q"},{"location":"api_subpackages/mlxtend.evaluate/#combined_ftest_5x2cv","text":"combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/","title":"combined_ftest_5x2cv"},{"location":"api_subpackages/mlxtend.evaluate/#confusion_matrix","text":"confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/","title":"confusion_matrix"},{"location":"api_subpackages/mlxtend.evaluate/#feature_importance_permutation","text":"feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/","title":"feature_importance_permutation"},{"location":"api_subpackages/mlxtend.evaluate/#ftest","text":"ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/","title":"ftest"},{"location":"api_subpackages/mlxtend.evaluate/#lift_score","text":"lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP / (TP+FP) ] / [ (TP+FN) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/","title":"lift_score"},{"location":"api_subpackages/mlxtend.evaluate/#mcnemar","text":"mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/)","title":"mcnemar"},{"location":"api_subpackages/mlxtend.evaluate/#mcnemar_table","text":"mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/","title":"mcnemar_table"},{"location":"api_subpackages/mlxtend.evaluate/#mcnemar_tables","text":"mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/)","title":"mcnemar_tables"},{"location":"api_subpackages/mlxtend.evaluate/#paired_ttest_5x2cv","text":"paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/","title":"paired_ttest_5x2cv"},{"location":"api_subpackages/mlxtend.evaluate/#paired_ttest_kfold_cv","text":"paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/","title":"paired_ttest_kfold_cv"},{"location":"api_subpackages/mlxtend.evaluate/#paired_ttest_resampled","text":"paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/","title":"paired_ttest_resampled"},{"location":"api_subpackages/mlxtend.evaluate/#permutation_test","text":"permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/","title":"permutation_test"},{"location":"api_subpackages/mlxtend.evaluate/#proportion_difference","text":"proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/","title":"proportion_difference"},{"location":"api_subpackages/mlxtend.evaluate/#scoring","text":"scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"scoring"},{"location":"api_subpackages/mlxtend.externals/","text":"mlxtend version: 0.14.0dev","title":"Mlxtend.externals"},{"location":"api_subpackages/mlxtend.feature_extraction/","text":"mlxtend version: 0.14.0dev LinearDiscriminantAnalysis LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/ Methods fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors. PrincipalComponentAnalysis PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors. RBFKernelPCA RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"Mlxtend.feature extraction"},{"location":"api_subpackages/mlxtend.feature_extraction/#lineardiscriminantanalysis","text":"LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/","title":"LinearDiscriminantAnalysis"},{"location":"api_subpackages/mlxtend.feature_extraction/#methods","text":"fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause_1","text":"transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_extraction/#principalcomponentanalysis","text":"PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/","title":"PrincipalComponentAnalysis"},{"location":"api_subpackages/mlxtend.feature_extraction/#methods_1","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_2","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause_2","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_3","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause_3","text":"transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_extraction/#rbfkernelpca","text":"RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/","title":"RBFKernelPCA"},{"location":"api_subpackages/mlxtend.feature_extraction/#methods_2","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_4","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause_4","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_5","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause_5","text":"transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_selection/","text":"mlxtend version: 0.14.0dev ColumnSelector ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features ExhaustiveFeatureSelector ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features} SequentialFeatureSelector SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"Mlxtend.feature selection"},{"location":"api_subpackages/mlxtend.feature_selection/#columnselector","text":"ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/","title":"ColumnSelector"},{"location":"api_subpackages/mlxtend.feature_selection/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features","title":"Methods"},{"location":"api_subpackages/mlxtend.feature_selection/#exhaustivefeatureselector","text":"ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/","title":"ExhaustiveFeatureSelector"},{"location":"api_subpackages/mlxtend.feature_selection/#methods_1","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"api_subpackages/mlxtend.feature_selection/#sequentialfeatureselector","text":"SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/","title":"SequentialFeatureSelector"},{"location":"api_subpackages/mlxtend.feature_selection/#methods_2","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"api_subpackages/mlxtend.file_io/","text":"mlxtend version: 0.14.0dev find_filegroups find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/ find_files find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"Mlxtend.file io"},{"location":"api_subpackages/mlxtend.file_io/#find_filegroups","text":"find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/","title":"find_filegroups"},{"location":"api_subpackages/mlxtend.file_io/#find_files","text":"find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"find_files"},{"location":"api_subpackages/mlxtend.frequent_patterns/","text":"mlxtend version: 0.14.0dev apriori apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/ association_rules association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"Mlxtend.frequent patterns"},{"location":"api_subpackages/mlxtend.frequent_patterns/#apriori","text":"apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/","title":"apriori"},{"location":"api_subpackages/mlxtend.frequent_patterns/#association_rules","text":"association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"association_rules"},{"location":"api_subpackages/mlxtend.image/","text":"mlxtend version: 0.14.0dev extract_face_landmarks extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"Mlxtend.image"},{"location":"api_subpackages/mlxtend.image/#extract_face_landmarks","text":"extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"extract_face_landmarks"},{"location":"api_subpackages/mlxtend.math/","text":"mlxtend version: 0.14.0dev factorial factorial(n) None num_combinations num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/ num_permutations num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/ vectorspace_dimensionality vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set vectorspace_orthonormalization vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of orthogonal vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"Mlxtend.math"},{"location":"api_subpackages/mlxtend.math/#factorial","text":"factorial(n) None","title":"factorial"},{"location":"api_subpackages/mlxtend.math/#num_combinations","text":"num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/","title":"num_combinations"},{"location":"api_subpackages/mlxtend.math/#num_permutations","text":"num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/","title":"num_permutations"},{"location":"api_subpackages/mlxtend.math/#vectorspace_dimensionality","text":"vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set","title":"vectorspace_dimensionality"},{"location":"api_subpackages/mlxtend.math/#vectorspace_orthonormalization","text":"vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of orthogonal vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"vectorspace_orthonormalization"},{"location":"api_subpackages/mlxtend.plotting/","text":"mlxtend version: 0.14.0dev category_scatter category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/ checkerboard_plot checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/ ecdf ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/ enrichment_plot enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/ plot_confusion_matrix plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/ plot_decision_regions plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/ plot_learning_curves plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_learning_curves/ plot_linear_regression plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/ plot_sequential_feature_selection plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/ remove_borders remove_borders(axes, left=False, bottom=False, right=True, top=True) Remove chart junk from matplotlib plots. Parameters axes : iterable An iterable containing plt.gca() or plt.subplot() objects, e.g. [plt.gca()]. left : bool (default: False ) Hide left axis spine if True. bottom : bool (default: False ) Hide bottom axis spine if True. right : bool (default: True ) Hide right axis spine if True. top : bool (default: True ) Hide top axis spine if True. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/remove_chartjunk/ scatterplotmatrix scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) stacked_barplot stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"Mlxtend.plotting"},{"location":"api_subpackages/mlxtend.plotting/#category_scatter","text":"category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/","title":"category_scatter"},{"location":"api_subpackages/mlxtend.plotting/#checkerboard_plot","text":"checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/","title":"checkerboard_plot"},{"location":"api_subpackages/mlxtend.plotting/#ecdf","text":"ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/","title":"ecdf"},{"location":"api_subpackages/mlxtend.plotting/#enrichment_plot","text":"enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/","title":"enrichment_plot"},{"location":"api_subpackages/mlxtend.plotting/#plot_confusion_matrix","text":"plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/","title":"plot_confusion_matrix"},{"location":"api_subpackages/mlxtend.plotting/#plot_decision_regions","text":"plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/","title":"plot_decision_regions"},{"location":"api_subpackages/mlxtend.plotting/#plot_learning_curves","text":"plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_learning_curves/","title":"plot_learning_curves"},{"location":"api_subpackages/mlxtend.plotting/#plot_linear_regression","text":"plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/","title":"plot_linear_regression"},{"location":"api_subpackages/mlxtend.plotting/#plot_sequential_feature_selection","text":"plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/","title":"plot_sequential_feature_selection"},{"location":"api_subpackages/mlxtend.plotting/#remove_borders","text":"remove_borders(axes, left=False, bottom=False, right=True, top=True) Remove chart junk from matplotlib plots. Parameters axes : iterable An iterable containing plt.gca() or plt.subplot() objects, e.g. [plt.gca()]. left : bool (default: False ) Hide left axis spine if True. bottom : bool (default: False ) Hide bottom axis spine if True. right : bool (default: True ) Hide right axis spine if True. top : bool (default: True ) Hide top axis spine if True. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/remove_chartjunk/","title":"remove_borders"},{"location":"api_subpackages/mlxtend.plotting/#scatterplotmatrix","text":"scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...)","title":"scatterplotmatrix"},{"location":"api_subpackages/mlxtend.plotting/#stacked_barplot","text":"stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"stacked_barplot"},{"location":"api_subpackages/mlxtend.preprocessing/","text":"mlxtend version: 0.14.0dev CopyTransformer CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. DenseTransformer DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. MeanCenterer MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/ Methods fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. OnehotTransactions OnehotTransactions( args, * kwargs) Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/ Methods fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] TransactionEncoder TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/ Methods fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] minmax_scaling minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/ one_hot one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/ shuffle_arrays_unison shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/ standardize standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"Mlxtend.preprocessing"},{"location":"api_subpackages/mlxtend.preprocessing/#copytransformer","text":"CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/","title":"CopyTransformer"},{"location":"api_subpackages/mlxtend.preprocessing/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array.","title":"Methods"},{"location":"api_subpackages/mlxtend.preprocessing/#densetransformer","text":"DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/","title":"DenseTransformer"},{"location":"api_subpackages/mlxtend.preprocessing/#methods_1","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array.","title":"Methods"},{"location":"api_subpackages/mlxtend.preprocessing/#meancenterer","text":"MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/","title":"MeanCenterer"},{"location":"api_subpackages/mlxtend.preprocessing/#methods_2","text":"fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered.","title":"Methods"},{"location":"api_subpackages/mlxtend.preprocessing/#onehottransactions","text":"OnehotTransactions( args, * kwargs) Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/","title":"OnehotTransactions"},{"location":"api_subpackages/mlxtend.preprocessing/#methods_3","text":"fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"Methods"},{"location":"api_subpackages/mlxtend.preprocessing/#transactionencoder","text":"TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/","title":"TransactionEncoder"},{"location":"api_subpackages/mlxtend.preprocessing/#methods_4","text":"fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"Methods"},{"location":"api_subpackages/mlxtend.preprocessing/#minmax_scaling","text":"minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/","title":"minmax_scaling"},{"location":"api_subpackages/mlxtend.preprocessing/#one_hot","text":"one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/","title":"one_hot"},{"location":"api_subpackages/mlxtend.preprocessing/#shuffle_arrays_unison","text":"shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/","title":"shuffle_arrays_unison"},{"location":"api_subpackages/mlxtend.preprocessing/#standardize","text":"standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"standardize"},{"location":"api_subpackages/mlxtend.regressor/","text":"mlxtend version: 0.14.0dev LinearRegression LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause StackingCVRegressor StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self StackingRegressor StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self Properties coef_ None intercept_ None","title":"Mlxtend.regressor"},{"location":"api_subpackages/mlxtend.regressor/#linearregression","text":"LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/","title":"LinearRegression"},{"location":"api_subpackages/mlxtend.regressor/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.regressor/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.regressor/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.regressor/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.regressor/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.regressor/#stackingcvregressor","text":"StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/","title":"StackingCVRegressor"},{"location":"api_subpackages/mlxtend.regressor/#methods_1","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_subpackages/mlxtend.regressor/#stackingregressor","text":"StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/","title":"StackingRegressor"},{"location":"api_subpackages/mlxtend.regressor/#methods_2","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_subpackages/mlxtend.regressor/#properties","text":"coef_ None intercept_ None","title":"Properties"},{"location":"api_subpackages/mlxtend.text/","text":"mlxtend version: 0.14.0dev generalize_names generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/ generalize_names_duplcheck generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/ tokenizer_emoticons tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/ tokenizer_words_and_emoticons tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"Mlxtend.text"},{"location":"api_subpackages/mlxtend.text/#generalize_names","text":"generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/","title":"generalize_names"},{"location":"api_subpackages/mlxtend.text/#generalize_names_duplcheck","text":"generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/","title":"generalize_names_duplcheck"},{"location":"api_subpackages/mlxtend.text/#tokenizer_emoticons","text":"tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/","title":"tokenizer_emoticons"},{"location":"api_subpackages/mlxtend.text/#tokenizer_words_and_emoticons","text":"tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"tokenizer_words_and_emoticons"},{"location":"api_subpackages/mlxtend.utils/","text":"mlxtend version: 0.14.0dev Counter Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/ Methods update() Print current iteration and time elapsed. assert_raises assert_raises(exception_type, message, func, args, * kwargs) Check that an exception is raised with a specific message Parameters exception_type : exception The exception that should be raised message : str (default: None) The error message that should be raised. Ignored if False or None. func : callable The function that raises the exception *args : positional arguments to func . **kwargs : keyword arguments to func check_Xy check_Xy(X, y, y_int=True) None format_kwarg_dictionaries format_kwarg_dictionaries(default_kwargs=None, user_kwargs=None, protected_keys=None) Function to combine default and user specified kwargs dictionaries Parameters default_kwargs : dict, optional Default kwargs (default is None). user_kwargs : dict, optional User specified kwargs (default is None). protected_keys : array_like, optional Sequence of keys to be removed from the returned dictionary (default is None). Returns formatted_kwargs : dict Formatted kwargs dictionary.","title":"Mlxtend.utils"},{"location":"api_subpackages/mlxtend.utils/#counter","text":"Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/","title":"Counter"},{"location":"api_subpackages/mlxtend.utils/#methods","text":"update() Print current iteration and time elapsed.","title":"Methods"},{"location":"api_subpackages/mlxtend.utils/#assert_raises","text":"assert_raises(exception_type, message, func, args, * kwargs) Check that an exception is raised with a specific message Parameters exception_type : exception The exception that should be raised message : str (default: None) The error message that should be raised. Ignored if False or None. func : callable The function that raises the exception *args : positional arguments to func . **kwargs : keyword arguments to func","title":"assert_raises"},{"location":"api_subpackages/mlxtend.utils/#check_xy","text":"check_Xy(X, y, y_int=True) None","title":"check_Xy"},{"location":"api_subpackages/mlxtend.utils/#format_kwarg_dictionaries","text":"format_kwarg_dictionaries(default_kwargs=None, user_kwargs=None, protected_keys=None) Function to combine default and user specified kwargs dictionaries Parameters default_kwargs : dict, optional Default kwargs (default is None). user_kwargs : dict, optional User specified kwargs (default is None). protected_keys : array_like, optional Sequence of keys to be removed from the returned dictionary (default is None). Returns formatted_kwargs : dict Formatted kwargs dictionary.","title":"format_kwarg_dictionaries"},{"location":"user_guide/classifier/Adaline/","text":"Adaptive Linear Neuron -- Adaline An implementation of the ADAptive LInear NEuron, Adaline, for binary classification tasks. from mlxtend.classifier import Adaline Overview An illustration of the ADAptive LInear NEuron (Adaline) -- a single-layer artificial linear neuron with a threshold unit: The Adaline classifier is closely related to the Ordinary Least Squares (OLS) Linear Regression algorithm; in OLS regression we find the line (or hyperplane) that minimizes the vertical offsets. Or in other words, we define the best-fitting line as the line that minimizes the sum of squared errors (SSE) or mean squared error (MSE) between our target variable (y) and our predicted output over all samples i in our dataset of size n . SSE = \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})^2 MSE = \\frac{1}{n} \\times SSE LinearRegression implements a linear regression model for performing ordinary least squares regression, and in Adaline, we add a threshold function g(\\cdot) to convert the continuous outcome to a categorical class label: $$y = g({z}) = \\begin{cases} 1 & \\text{if z $\\ge$ 0}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ An Adaline model can be trained by one of the following three approaches: Normal Equations Gradient Descent Stochastic Gradient Descent Normal Equations (closed-form solution) The closed-form solution should be preferred for \"smaller\" datasets where calculating (a \"costly\") matrix inverse is not a concern. For very large datasets, or datasets where the inverse of [X^T X] may not exist (the matrix is non-invertible or singular, e.g., in case of perfect multicollinearity), the gradient descent or stochastic gradient descent approaches are to be preferred. The linear function (linear regression model) is defined as: z = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{j=0}^{m} w_j x_j = \\mathbf{w}^T\\mathbf{x} where y is the response variable, \\mathbf{x} is an m -dimensional sample vector, and \\mathbf{w} is the weight vector (vector of coefficients). Note that w_0 represents the y-axis intercept of the model and therefore x_0=1 . Using the closed-form solution (normal equation), we compute the weights of the model as follows: \\mathbf{w} = (\\mathbf{X}^T\\mathbf{X})^{-1}\\mathbf{X}^Ty Gradient Descent (GD) and Stochastic Gradient Descent (SGD) In the current implementation, the Adaline model is learned via Gradient Descent or Stochastic Gradient Descent. See Gradient Descent and Stochastic Gradient Descent and Deriving the Gradient Descent Rule for Linear Regression and Adaline for details. Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates References B. Widrow, M. E. Hoff, et al. Adaptive switching circuits . 1960. Example 1 - Closed Form Solution from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=30, eta=0.01, minibatches=None, random_seed=1) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() Example 2 - Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=30, eta=0.01, minibatches=1, # for Gradient Descent Learning random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') Iteration: 30/30 | Cost 3.79 | Elapsed: 0:00:00 | ETA: 0:00:00 Example 3 - Stochastic Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=15, eta=0.02, minibatches=len(y), # for SGD learning random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 15/15 | Cost 3.81 | Elapsed: 0:00:00 | ETA: 0:00:00 Example 4 - Stochastic Gradient Descent with Minibatches from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=15, eta=0.02, minibatches=5, # for SGD learning w. minibatch size 20 random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 15/15 | Cost 3.87 | Elapsed: 0:00:00 | ETA: 0:00:00 API Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Adaptive Linear Neuron -- Adaline"},{"location":"user_guide/classifier/Adaline/#adaptive-linear-neuron-adaline","text":"An implementation of the ADAptive LInear NEuron, Adaline, for binary classification tasks. from mlxtend.classifier import Adaline","title":"Adaptive Linear Neuron -- Adaline"},{"location":"user_guide/classifier/Adaline/#overview","text":"An illustration of the ADAptive LInear NEuron (Adaline) -- a single-layer artificial linear neuron with a threshold unit: The Adaline classifier is closely related to the Ordinary Least Squares (OLS) Linear Regression algorithm; in OLS regression we find the line (or hyperplane) that minimizes the vertical offsets. Or in other words, we define the best-fitting line as the line that minimizes the sum of squared errors (SSE) or mean squared error (MSE) between our target variable (y) and our predicted output over all samples i in our dataset of size n . SSE = \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})^2 MSE = \\frac{1}{n} \\times SSE LinearRegression implements a linear regression model for performing ordinary least squares regression, and in Adaline, we add a threshold function g(\\cdot) to convert the continuous outcome to a categorical class label: $$y = g({z}) = \\begin{cases} 1 & \\text{if z $\\ge$ 0}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ An Adaline model can be trained by one of the following three approaches: Normal Equations Gradient Descent Stochastic Gradient Descent","title":"Overview"},{"location":"user_guide/classifier/Adaline/#normal-equations-closed-form-solution","text":"The closed-form solution should be preferred for \"smaller\" datasets where calculating (a \"costly\") matrix inverse is not a concern. For very large datasets, or datasets where the inverse of [X^T X] may not exist (the matrix is non-invertible or singular, e.g., in case of perfect multicollinearity), the gradient descent or stochastic gradient descent approaches are to be preferred. The linear function (linear regression model) is defined as: z = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{j=0}^{m} w_j x_j = \\mathbf{w}^T\\mathbf{x} where y is the response variable, \\mathbf{x} is an m -dimensional sample vector, and \\mathbf{w} is the weight vector (vector of coefficients). Note that w_0 represents the y-axis intercept of the model and therefore x_0=1 . Using the closed-form solution (normal equation), we compute the weights of the model as follows: \\mathbf{w} = (\\mathbf{X}^T\\mathbf{X})^{-1}\\mathbf{X}^Ty","title":"Normal Equations (closed-form solution)"},{"location":"user_guide/classifier/Adaline/#gradient-descent-gd-and-stochastic-gradient-descent-sgd","text":"In the current implementation, the Adaline model is learned via Gradient Descent or Stochastic Gradient Descent. See Gradient Descent and Stochastic Gradient Descent and Deriving the Gradient Descent Rule for Linear Regression and Adaline for details. Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates","title":"Gradient Descent (GD) and Stochastic Gradient Descent (SGD)"},{"location":"user_guide/classifier/Adaline/#references","text":"B. Widrow, M. E. Hoff, et al. Adaptive switching circuits . 1960.","title":"References"},{"location":"user_guide/classifier/Adaline/#example-1-closed-form-solution","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=30, eta=0.01, minibatches=None, random_seed=1) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show()","title":"Example 1 - Closed Form Solution"},{"location":"user_guide/classifier/Adaline/#example-2-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=30, eta=0.01, minibatches=1, # for Gradient Descent Learning random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') Iteration: 30/30 | Cost 3.79 | Elapsed: 0:00:00 | ETA: 0:00:00 ","title":"Example 2 - Gradient Descent"},{"location":"user_guide/classifier/Adaline/#example-3-stochastic-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=15, eta=0.02, minibatches=len(y), # for SGD learning random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 15/15 | Cost 3.81 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 3 - Stochastic Gradient Descent"},{"location":"user_guide/classifier/Adaline/#example-4-stochastic-gradient-descent-with-minibatches","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=15, eta=0.02, minibatches=5, # for SGD learning w. minibatch size 20 random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 15/15 | Cost 3.87 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 4 - Stochastic Gradient Descent with Minibatches"},{"location":"user_guide/classifier/Adaline/#api","text":"Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/","title":"API"},{"location":"user_guide/classifier/Adaline/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Methods"},{"location":"user_guide/classifier/EnsembleVoteClassifier/","text":"EnsembleVoteClassifier Implementation of a majority voting EnsembleVoteClassifier for classification. from mlxtend.classifier import EnsembleVoteClassifier Overview The EnsembleVoteClassifier is a meta-classifier for combining similar or conceptually different machine learning classifiers for classification via majority or plurality voting. (For simplicity, we will refer to both majority and plurality voting as majority voting.) The EnsembleVoteClassifier implements \"hard\" and \"soft\" voting. In hard voting, we predict the final class label as the class label that has been predicted most frequently by the classification models. In soft voting, we predict the class labels by averaging the class-probabilities (only recommended if the classifiers are well-calibrated). Note If you are interested in using the EnsembleVoteClassifier , please note that it is now also available through scikit learn (>0.17) as VotingClassifier . Majority Voting / Hard Voting Hard voting is the simplest case of majority voting. Here, we predict the class label \\hat{y} via majority (plurality) voting of each classifier C_j : \\hat{y}=mode\\{C_1(\\mathbf{x}), C_2(\\mathbf{x}), ..., C_m(\\mathbf{x})\\} Assuming that we combine three classifiers that classify a training sample as follows: classifier 1 -> class 0 classifier 2 -> class 0 classifier 3 -> class 1 \\hat{y}=mode\\{0, 0, 1\\} = 0 Via majority vote, we would we would classify the sample as \"class 0.\" Weighted Majority Vote In addition to the simple majority vote (hard voting) as described in the previous section, we can compute a weighted majority vote by associating a weight w_j with classifier C_j : \\hat{y} = \\arg \\max_i \\sum^{m}_{j=1} w_j \\chi_A \\big(C_j(\\mathbf{x})=i\\big), where \\chi_A is the characteristic function [C_j(\\mathbf{x}) = i \\; \\in A] , and A is the set of unique class labels. Continuing with the example from the previous section classifier 1 -> class 0 classifier 2 -> class 0 classifier 3 -> class 1 assigning the weights {0.2, 0.2, 0.6} would yield a prediction \\hat{y} = 1 : \\arg \\max_i [0.2 \\times i_0 + 0.2 \\times i_0 + 0.6 \\times i_1] = 1 Soft Voting In soft voting, we predict the class labels based on the predicted probabilities p for classifier -- this approach is only recommended if the classifiers are well-calibrated. \\hat{y} = \\arg \\max_i \\sum^{m}_{j=1} w_j p_{ij}, where w_j is the weight that can be assigned to the j th classifier. Assuming the example in the previous section was a binary classification task with class labels i \\in \\{0, 1\\} , our ensemble could make the following prediction: C_1(\\mathbf{x}) \\rightarrow [0.9, 0.1] C_2(\\mathbf{x}) \\rightarrow [0.8, 0.2] C_3(\\mathbf{x}) \\rightarrow [0.4, 0.6] Using uniform weights, we compute the average probabilities: p(i_0 \\mid \\mathbf{x}) = \\frac{0.9 + 0.8 + 0.4}{3} = 0.7 \\\\\\\\ p(i_1 \\mid \\mathbf{x}) = \\frac{0.1 + 0.2 + 0.6}{3} = 0.3 \\hat{y} = \\arg \\max_i \\big[p(i_0 \\mid \\mathbf{x}), p(i_1 \\mid \\mathbf{x}) \\big] = 0 However, assigning the weights {0.1, 0.1, 0.8} would yield a prediction \\hat{y} = 1 : p(i_0 \\mid \\mathbf{x}) = {0.1 \\times 0.9 + 0.1 \\times 0.8 + 0.8 \\times 0.4} = 0.49 \\\\\\\\ p(i_1 \\mid \\mathbf{x}) = {0.1 \\times 0.1 + 0.2 \\times 0.1 + 0.8 \\times 0.6} = 0.51 \\hat{y} = \\arg \\max_i \\big[p(i_0 \\mid \\mathbf{x}), p(i_1 \\mid \\mathbf{x}) \\big] = 1 References [1] S. Raschka. Python Machine Learning . Packt Publishing Ltd., 2015. Example 1 - Classifying Iris Flowers Using Different Classification Models from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier import numpy as np clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() print('5-fold cross validation:\\n') labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes'] for clf, label in zip([clf1, clf2, clf3], labels): scores = model_selection.cross_val_score(clf, X, y, cv=5, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 5-fold cross validation: Accuracy: 0.90 (+/- 0.05) [Logistic Regression] Accuracy: 0.93 (+/- 0.05) [Random Forest] Accuracy: 0.91 (+/- 0.04) [Naive Bayes] from mlxtend.classifier import EnsembleVoteClassifier eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[1,1,1]) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, label in zip([clf1, clf2, clf3, eclf], labels): scores = model_selection.cross_val_score(clf, X, y, cv=5, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) Accuracy: 0.90 (+/- 0.05) [Logistic Regression] Accuracy: 0.93 (+/- 0.05) [Random Forest] Accuracy: 0.91 (+/- 0.04) [Naive Bayes] Accuracy: 0.95 (+/- 0.05) [Ensemble] Plotting Decision Regions import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) Example 2 - Grid Search from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') params = {'logisticregression__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200],} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid.fit(iris.data, iris.target) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) 0.953 +/- 0.01 {'logisticregression__C': 1.0, 'randomforestclassifier__n_estimators': 20} 0.960 +/- 0.01 {'logisticregression__C': 1.0, 'randomforestclassifier__n_estimators': 200} 0.960 +/- 0.01 {'logisticregression__C': 100.0, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'logisticregression__C': 100.0, 'randomforestclassifier__n_estimators': 200} Note : If the EnsembleClassifier is initialized with multiple similar estimator objects, the estimator names are modified with consecutive integer indices, for example: clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) eclf = EnsembleVoteClassifier(clfs=[clf1, clf1, clf2], voting='soft') params = {'logisticregression-1__C': [1.0, 100.0], 'logisticregression-2__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200],} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid = grid.fit(iris.data, iris.target) Note The EnsembleVoteClass also enables grid search over the clfs argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'clfs': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] . Example 3 - Majority voting with classifiers trained on different feature subsets Feature selection algorithms implemented in scikit-learn as well as the SequentialFeatureSelector implement a transform method that passes the reduced feature subset to the next item in a Pipeline . For example, the method def transform(self, X): return X[:, self.k_feature_idx_] returns the best feature columns, k_feature_idx_ , given a dataset X. Thus, we simply need to construct a Pipeline consisting of the feature selector and the classifier in order to select different feature subsets for different algorithms. During fitting , the optimal feature subsets are automatically determined via the GridSearchCV object, and by calling predict , the fitted feature selector in the pipeline only passes these columns along, which resulted in the best performance for the respective classifier. from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, :], iris.target from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier from sklearn.pipeline import Pipeline from mlxtend.feature_selection import SequentialFeatureSelector clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() # Creating a feature-selection-classifier pipeline sfs1 = SequentialFeatureSelector(clf1, k_features=4, forward=True, floating=False, scoring='accuracy', verbose=0, cv=0) clf1_pipe = Pipeline([('sfs', sfs1), ('logreg', clf1)]) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') params = {'pipeline__sfs__k_features': [1, 2, 3], 'pipeline__logreg__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200]} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid.fit(iris.data, iris.target) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 200} 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 200} 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 200} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 200} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 200} 0.960 +/- 0.01 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 200} The best parameters determined via GridSearch are: grid.best_params_ {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} Now, we assign these parameters to the ensemble voting classifier, fit the models on the complete training set, and perform a prediction on 3 samples from the Iris dataset. eclf = eclf.set_params(**grid.best_params_) eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2]) Manual Approach Alternatively, we can select different columns \"manually\" using the ColumnSelector object. In this example, we select only the first (sepal length) and third (petal length) column for the logistic regression classifier ( clf1 ). from mlxtend.feature_selection import ColumnSelector col_sel = ColumnSelector(cols=[0, 2]) clf1_pipe = Pipeline([('sel', col_sel), ('logreg', clf1)]) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2]) Furthermore, we can fit the SequentialFeatureSelector separately, outside the grid search hyperparameter optimization pipeline. Here, we determine the best features first, and then we construct a pipeline using these \"fixed,\" best features as seed for the ColumnSelector : sfs1 = SequentialFeatureSelector(clf1, k_features=2, forward=True, floating=False, scoring='accuracy', verbose=1, cv=0) sfs1.fit(X, y) print('Best features', sfs1.k_feature_idx_) col_sel = ColumnSelector(cols=sfs1.k_feature_idx_) clf1_pipe = Pipeline([('sel', col_sel), ('logreg', clf1)]) [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished Features: 1/2[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished Features: 2/2 Best features (0, 2) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2]) Example 5 - Using Pre-fitted Classifiers from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target Assume that we previously fitted our classifiers: from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier import numpy as np clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() for clf in (clf1, clf2, clf3): clf.fit(X, y) By setting refit=False , the EnsembleVoteClassifier will not re-fit these classifers to save computational time: from mlxtend.classifier import EnsembleVoteClassifier import copy eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[1,1,1], refit=False) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] eclf.fit(X, y) print('accuracy:', np.mean(y == eclf.predict(X))) accuracy: 0.973333333333 However, please note that refit=False is incompatible to any form of cross-validation that is done in e.g., model_selection.cross_val_score or model_selection.GridSearchCV , etc., since it would require the classifiers to be refit to the training folds. Thus, only use refit=False if you want to make a prediction directly without cross-validation. Example 6 - Ensembles of Classifiers that Operate on Different Feature Subsets If desired, the different classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import EnsembleVoteClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) eclf = EnsembleVoteClassifier(clfs=[pipe1, pipe2]) eclf.fit(X, y) EnsembleVoteClassifier(clfs=[Pipeline(memory=None, steps=[('columnselector', ColumnSelector(cols=(0, 2), drop_axis=False)), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], refit=True, verbose=0, voting='hard', weights=None) Example 7 - A Note about Scikit-Learn SVMs and Soft Voting This section provides some additional technical insights in how probabilities are used when voting='soft' . Note that scikit-learn estimates the probabilities for SVMs (more info here: http://scikit-learn.org/stable/modules/svm.html#scores-probabilities) in a way that these may not be consistent with the class labels that the SVM predicts. This is an extreme example, but let's say we have a dataset with 3 class labels, 0, 1, and 2. For a given training example, the SVM classifier may predict class 2. However, the class-membership probabilities may look as follows: class 0: 99% class 1: 0.5% class 2: 0.5% A practical example of this scenario is shown below: import numpy as np from mlxtend.classifier import EnsembleVoteClassifier from sklearn.svm import SVC from sklearn.datasets import load_iris iris = load_iris() X, y = iris.data, iris.target clf2 = SVC(probability=True, random_state=4) clf2.fit(X, y) eclf = EnsembleVoteClassifier(clfs=[clf2], voting='soft', refit=False) eclf.fit(X, y) for svm_class, e_class, svm_prob, e_prob, in zip(clf2.predict(X), eclf.predict(X), clf2.predict_proba(X), eclf.predict_proba(X)): if svm_class != e_class: print('============') print('Probas from SVM :', svm_prob) print('Class from SVM :', svm_class) print('Probas from SVM in Ensemble:', e_prob) print('Class from SVM in Ensemble :', e_class) print('============') ============ Probas from SVM : [ 0.01192489 0.47662663 0.51144848] Class from SVM : 1 Probas from SVM in Ensemble: [ 0.01192489 0.47662663 0.51144848] Class from SVM in Ensemble : 2 ============ Based on the probabilities, we would expect the SVM to predict class 2, because it has the highest probability. Since the EnsembleVoteClassifier uses the argmax function internally if voting='soft' , it would indeed predict class 2 in this case even if the ensemble consists of only one SVM model. Note that in practice, this minor technical detail does not need to concern you, but it is useful to keep it in mind in case you are wondering about results from a 1-model SVM ensemble compared to that SVM alone -- this is not a bug. API EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier.","title":"EnsembleVoteClassifier"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#ensemblevoteclassifier","text":"Implementation of a majority voting EnsembleVoteClassifier for classification. from mlxtend.classifier import EnsembleVoteClassifier","title":"EnsembleVoteClassifier"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#overview","text":"The EnsembleVoteClassifier is a meta-classifier for combining similar or conceptually different machine learning classifiers for classification via majority or plurality voting. (For simplicity, we will refer to both majority and plurality voting as majority voting.) The EnsembleVoteClassifier implements \"hard\" and \"soft\" voting. In hard voting, we predict the final class label as the class label that has been predicted most frequently by the classification models. In soft voting, we predict the class labels by averaging the class-probabilities (only recommended if the classifiers are well-calibrated). Note If you are interested in using the EnsembleVoteClassifier , please note that it is now also available through scikit learn (>0.17) as VotingClassifier .","title":"Overview"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#majority-voting-hard-voting","text":"Hard voting is the simplest case of majority voting. Here, we predict the class label \\hat{y} via majority (plurality) voting of each classifier C_j : \\hat{y}=mode\\{C_1(\\mathbf{x}), C_2(\\mathbf{x}), ..., C_m(\\mathbf{x})\\} Assuming that we combine three classifiers that classify a training sample as follows: classifier 1 -> class 0 classifier 2 -> class 0 classifier 3 -> class 1 \\hat{y}=mode\\{0, 0, 1\\} = 0 Via majority vote, we would we would classify the sample as \"class 0.\"","title":"Majority Voting / Hard Voting"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#weighted-majority-vote","text":"In addition to the simple majority vote (hard voting) as described in the previous section, we can compute a weighted majority vote by associating a weight w_j with classifier C_j : \\hat{y} = \\arg \\max_i \\sum^{m}_{j=1} w_j \\chi_A \\big(C_j(\\mathbf{x})=i\\big), where \\chi_A is the characteristic function [C_j(\\mathbf{x}) = i \\; \\in A] , and A is the set of unique class labels. Continuing with the example from the previous section classifier 1 -> class 0 classifier 2 -> class 0 classifier 3 -> class 1 assigning the weights {0.2, 0.2, 0.6} would yield a prediction \\hat{y} = 1 : \\arg \\max_i [0.2 \\times i_0 + 0.2 \\times i_0 + 0.6 \\times i_1] = 1","title":"Weighted Majority Vote"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#soft-voting","text":"In soft voting, we predict the class labels based on the predicted probabilities p for classifier -- this approach is only recommended if the classifiers are well-calibrated. \\hat{y} = \\arg \\max_i \\sum^{m}_{j=1} w_j p_{ij}, where w_j is the weight that can be assigned to the j th classifier. Assuming the example in the previous section was a binary classification task with class labels i \\in \\{0, 1\\} , our ensemble could make the following prediction: C_1(\\mathbf{x}) \\rightarrow [0.9, 0.1] C_2(\\mathbf{x}) \\rightarrow [0.8, 0.2] C_3(\\mathbf{x}) \\rightarrow [0.4, 0.6] Using uniform weights, we compute the average probabilities: p(i_0 \\mid \\mathbf{x}) = \\frac{0.9 + 0.8 + 0.4}{3} = 0.7 \\\\\\\\ p(i_1 \\mid \\mathbf{x}) = \\frac{0.1 + 0.2 + 0.6}{3} = 0.3 \\hat{y} = \\arg \\max_i \\big[p(i_0 \\mid \\mathbf{x}), p(i_1 \\mid \\mathbf{x}) \\big] = 0 However, assigning the weights {0.1, 0.1, 0.8} would yield a prediction \\hat{y} = 1 : p(i_0 \\mid \\mathbf{x}) = {0.1 \\times 0.9 + 0.1 \\times 0.8 + 0.8 \\times 0.4} = 0.49 \\\\\\\\ p(i_1 \\mid \\mathbf{x}) = {0.1 \\times 0.1 + 0.2 \\times 0.1 + 0.8 \\times 0.6} = 0.51 \\hat{y} = \\arg \\max_i \\big[p(i_0 \\mid \\mathbf{x}), p(i_1 \\mid \\mathbf{x}) \\big] = 1","title":"Soft Voting"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#references","text":"[1] S. Raschka. Python Machine Learning . Packt Publishing Ltd., 2015.","title":"References"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-1-classifying-iris-flowers-using-different-classification-models","text":"from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier import numpy as np clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() print('5-fold cross validation:\\n') labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes'] for clf, label in zip([clf1, clf2, clf3], labels): scores = model_selection.cross_val_score(clf, X, y, cv=5, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 5-fold cross validation: Accuracy: 0.90 (+/- 0.05) [Logistic Regression] Accuracy: 0.93 (+/- 0.05) [Random Forest] Accuracy: 0.91 (+/- 0.04) [Naive Bayes] from mlxtend.classifier import EnsembleVoteClassifier eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[1,1,1]) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, label in zip([clf1, clf2, clf3, eclf], labels): scores = model_selection.cross_val_score(clf, X, y, cv=5, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) Accuracy: 0.90 (+/- 0.05) [Logistic Regression] Accuracy: 0.93 (+/- 0.05) [Random Forest] Accuracy: 0.91 (+/- 0.04) [Naive Bayes] Accuracy: 0.95 (+/- 0.05) [Ensemble]","title":"Example 1 - Classifying Iris Flowers Using Different Classification Models"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#plotting-decision-regions","text":"import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab)","title":"Plotting Decision Regions"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-2-grid-search","text":"from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') params = {'logisticregression__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200],} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid.fit(iris.data, iris.target) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) 0.953 +/- 0.01 {'logisticregression__C': 1.0, 'randomforestclassifier__n_estimators': 20} 0.960 +/- 0.01 {'logisticregression__C': 1.0, 'randomforestclassifier__n_estimators': 200} 0.960 +/- 0.01 {'logisticregression__C': 100.0, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'logisticregression__C': 100.0, 'randomforestclassifier__n_estimators': 200} Note : If the EnsembleClassifier is initialized with multiple similar estimator objects, the estimator names are modified with consecutive integer indices, for example: clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) eclf = EnsembleVoteClassifier(clfs=[clf1, clf1, clf2], voting='soft') params = {'logisticregression-1__C': [1.0, 100.0], 'logisticregression-2__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200],} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid = grid.fit(iris.data, iris.target) Note The EnsembleVoteClass also enables grid search over the clfs argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'clfs': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] .","title":"Example 2 - Grid Search"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-3-majority-voting-with-classifiers-trained-on-different-feature-subsets","text":"Feature selection algorithms implemented in scikit-learn as well as the SequentialFeatureSelector implement a transform method that passes the reduced feature subset to the next item in a Pipeline . For example, the method def transform(self, X): return X[:, self.k_feature_idx_] returns the best feature columns, k_feature_idx_ , given a dataset X. Thus, we simply need to construct a Pipeline consisting of the feature selector and the classifier in order to select different feature subsets for different algorithms. During fitting , the optimal feature subsets are automatically determined via the GridSearchCV object, and by calling predict , the fitted feature selector in the pipeline only passes these columns along, which resulted in the best performance for the respective classifier. from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, :], iris.target from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier from sklearn.pipeline import Pipeline from mlxtend.feature_selection import SequentialFeatureSelector clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() # Creating a feature-selection-classifier pipeline sfs1 = SequentialFeatureSelector(clf1, k_features=4, forward=True, floating=False, scoring='accuracy', verbose=0, cv=0) clf1_pipe = Pipeline([('sfs', sfs1), ('logreg', clf1)]) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') params = {'pipeline__sfs__k_features': [1, 2, 3], 'pipeline__logreg__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200]} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid.fit(iris.data, iris.target) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 200} 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 200} 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 200} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 200} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 200} 0.960 +/- 0.01 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 200} The best parameters determined via GridSearch are: grid.best_params_ {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} Now, we assign these parameters to the ensemble voting classifier, fit the models on the complete training set, and perform a prediction on 3 samples from the Iris dataset. eclf = eclf.set_params(**grid.best_params_) eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2])","title":"Example 3 - Majority voting with classifiers trained on different feature subsets"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#manual-approach","text":"Alternatively, we can select different columns \"manually\" using the ColumnSelector object. In this example, we select only the first (sepal length) and third (petal length) column for the logistic regression classifier ( clf1 ). from mlxtend.feature_selection import ColumnSelector col_sel = ColumnSelector(cols=[0, 2]) clf1_pipe = Pipeline([('sel', col_sel), ('logreg', clf1)]) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2]) Furthermore, we can fit the SequentialFeatureSelector separately, outside the grid search hyperparameter optimization pipeline. Here, we determine the best features first, and then we construct a pipeline using these \"fixed,\" best features as seed for the ColumnSelector : sfs1 = SequentialFeatureSelector(clf1, k_features=2, forward=True, floating=False, scoring='accuracy', verbose=1, cv=0) sfs1.fit(X, y) print('Best features', sfs1.k_feature_idx_) col_sel = ColumnSelector(cols=sfs1.k_feature_idx_) clf1_pipe = Pipeline([('sel', col_sel), ('logreg', clf1)]) [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished Features: 1/2[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished Features: 2/2 Best features (0, 2) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2])","title":"Manual Approach"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-5-using-pre-fitted-classifiers","text":"from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target Assume that we previously fitted our classifiers: from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier import numpy as np clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() for clf in (clf1, clf2, clf3): clf.fit(X, y) By setting refit=False , the EnsembleVoteClassifier will not re-fit these classifers to save computational time: from mlxtend.classifier import EnsembleVoteClassifier import copy eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[1,1,1], refit=False) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] eclf.fit(X, y) print('accuracy:', np.mean(y == eclf.predict(X))) accuracy: 0.973333333333 However, please note that refit=False is incompatible to any form of cross-validation that is done in e.g., model_selection.cross_val_score or model_selection.GridSearchCV , etc., since it would require the classifiers to be refit to the training folds. Thus, only use refit=False if you want to make a prediction directly without cross-validation.","title":"Example 5 - Using Pre-fitted Classifiers"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-6-ensembles-of-classifiers-that-operate-on-different-feature-subsets","text":"If desired, the different classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import EnsembleVoteClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) eclf = EnsembleVoteClassifier(clfs=[pipe1, pipe2]) eclf.fit(X, y) EnsembleVoteClassifier(clfs=[Pipeline(memory=None, steps=[('columnselector', ColumnSelector(cols=(0, 2), drop_axis=False)), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], refit=True, verbose=0, voting='hard', weights=None)","title":"Example 6 - Ensembles of Classifiers that Operate on Different Feature Subsets"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-7-a-note-about-scikit-learn-svms-and-soft-voting","text":"This section provides some additional technical insights in how probabilities are used when voting='soft' . Note that scikit-learn estimates the probabilities for SVMs (more info here: http://scikit-learn.org/stable/modules/svm.html#scores-probabilities) in a way that these may not be consistent with the class labels that the SVM predicts. This is an extreme example, but let's say we have a dataset with 3 class labels, 0, 1, and 2. For a given training example, the SVM classifier may predict class 2. However, the class-membership probabilities may look as follows: class 0: 99% class 1: 0.5% class 2: 0.5% A practical example of this scenario is shown below: import numpy as np from mlxtend.classifier import EnsembleVoteClassifier from sklearn.svm import SVC from sklearn.datasets import load_iris iris = load_iris() X, y = iris.data, iris.target clf2 = SVC(probability=True, random_state=4) clf2.fit(X, y) eclf = EnsembleVoteClassifier(clfs=[clf2], voting='soft', refit=False) eclf.fit(X, y) for svm_class, e_class, svm_prob, e_prob, in zip(clf2.predict(X), eclf.predict(X), clf2.predict_proba(X), eclf.predict_proba(X)): if svm_class != e_class: print('============') print('Probas from SVM :', svm_prob) print('Class from SVM :', svm_class) print('Probas from SVM in Ensemble:', e_prob) print('Class from SVM in Ensemble :', e_class) print('============') ============ Probas from SVM : [ 0.01192489 0.47662663 0.51144848] Class from SVM : 1 Probas from SVM in Ensemble: [ 0.01192489 0.47662663 0.51144848] Class from SVM in Ensemble : 2 ============ Based on the probabilities, we would expect the SVM to predict class 2, because it has the highest probability. Since the EnsembleVoteClassifier uses the argmax function internally if voting='soft' , it would indeed predict class 2 in this case even if the ensemble consists of only one SVM model. Note that in practice, this minor technical detail does not need to concern you, but it is useful to keep it in mind in case you are wondering about results from a 1-model SVM ensemble compared to that SVM alone -- this is not a bug.","title":"Example 7 - A Note about Scikit-Learn SVMs and Soft Voting"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#api","text":"EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/","title":"API"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#methods","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier.","title":"Methods"},{"location":"user_guide/classifier/LogisticRegression/","text":"Logistic Regression A logistic regression class for binary classification tasks. from mlxtend.classifier import LogisticRegression Overview Related to the Perceptron and 'Adaline' , a Logistic Regression model is a linear model for binary classification. However, instead of minimizing a linear cost function such as the sum of squared errors (SSE) in Adaline, we minimize a sigmoid function, i.e., the logistic function: \\phi(z) = \\frac{1}{1 + e^{-z}}, where z is defined as the net input z = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{j=0}^{m} w_j x_j= \\mathbf{w}^T\\mathbf{x}. The net input is in turn based on the logit function logit(p(y=1 \\mid \\mathbf{x})) = z. Here, p(y=1 \\mid \\mathbf{x}) is the conditional probability that a particular sample belongs to class 1 given its features \\mathbf{x} . The logit function takes inputs in the range [0, 1] and transform them to values over the entire real number range. In contrast, the logistic function takes input values over the entire real number range and transforms them to values in the range [0, 1]. In other words, the logistic function is the inverse of the logit function, and it lets us predict the conditional probability that a certain sample belongs to class 1 (or class 0). After model fitting, the conditional probability p(y=1 \\mid \\mathbf{x}) is converted to a binary class label via a threshold function g(\\cdot) : $$y = g({z}) = \\begin{cases} 1 & \\text{if $\\phi(z) \\ge 0.5$}\\\\ 0 & \\text{otherwise.} \\end{cases} $$ or equivalently: $$y = g({z}) = \\begin{cases} 1 & \\text{if z $\\ge$ 0}\\\\ 0 & \\text{otherwise}. \\end{cases} $$ Objective Function -- Log-Likelihood In order to parameterize a logistic regression model, we maximize the likelihood L(\\cdot) (or minimize the logistic cost function). We write the likelihood as L(\\mathbf{w}) = P(\\mathbf{y} \\mid \\mathbf{x};\\mathbf{w}) = \\prod_{i=1}^{n} P\\big(y^{(i)} \\mid x^{(i)}; \\mathbf{w}\\big) = \\prod^{n}_{i=1}\\bigg(\\phi\\big(z^{(i)}\\big)\\bigg)^{y^{(i)}} \\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg)^{1-y^{(i)}}, under the assumption that the training samples are independent of each other. In practice, it is easier to maximize the (natural) log of this equation, which is called the log-likelihood function: l(\\mathbf{w}) = \\log L(\\mathbf{w}) = \\sum^{n}_{i=1} y^{(i)} \\log \\bigg(\\phi\\big(z^{(i)}\\big)\\bigg) + \\big( 1 - y^{(i)}\\big) \\log \\big(1-\\phi\\big(z^{(i)}\\big)\\big) One advantage of taking the log is to avoid numeric underflow (and challenges with floating point math) for very small likelihoods. Another advantage is that we can obtain the derivative more easily, using the addition trick to rewrite the product of factors as a summation term, which we can then maximize using optimization algorithms such as gradient ascent. Objective Function -- Logistic Cost Function An alternative to maximizing the log-likelihood, we can define a cost function J(\\cdot) to be minimized; we rewrite the log-likelihood as: J(\\mathbf{w}) = \\sum_{i=1}^{m} - y^{(i)} log \\bigg( \\phi\\big(z^{(i)}\\big) \\bigg) - \\big(1 - y^{(i)}\\big) log\\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg) $$ J\\big(\\phi(z), y; \\mathbf{w}\\big) = \\begin{cases} -log\\big(\\phi(z) \\big) & \\text{if $y = 1$}\\\\ -log\\big(1- \\phi(z) \\big) & \\text{if $y = 0$} \\end{cases} $$ As we can see in the figure above, we penalize wrong predictions with an increasingly larger cost. Gradient Descent (GD) and Stochastic Gradient Descent (SGD) Optimization Gradient Ascent and the log-likelihood To learn the weight coefficient of a logistic regression model via gradient-based optimization, we compute the partial derivative of the log-likelihood function -- w.r.t. the j th weight -- as follows: \\frac{\\partial}{\\partial w_j} l(\\mathbf{w}) = \\bigg(y \\frac{1}{\\phi(z)} - (1-y) \\frac{1}{1-\\phi{(z)}} \\bigg) \\frac{\\partial}{\\partial w_j}\\phi(z) As an intermediate step, we compute the partial derivative of the sigmoid function, which will come in handy later: \\begin{align} &\\frac{\\partial}{\\partial z} \\phi(z) = \\frac{\\partial}{{\\partial z}} \\frac{1}{1+e^{-z}} \\\\\\\\ &= \\frac{1}{(1 + e^{-z})^{2}} e^{-z}\\\\\\\\ &= \\frac{1}{1+e^{-z}} \\bigg(1 - \\frac{1}{1+e^{-z}} \\bigg)\\\\\\\\ &= \\phi(z)\\big(1-\\phi(z)\\big) \\end{align} Now, we re-substitute \\frac{\\partial}{\\partial z} \\phi(z) = \\phi(z) \\big(1 - \\phi(z)\\big) back into in the log-likelihood partial derivative equation and obtain the equation shown below: \\begin{align} & \\bigg(y \\frac{1}{\\phi{(z)}} - (1 - y) \\frac{1}{1 - \\phi(z)} \\bigg) \\frac{\\partial}{\\partial w_j} \\phi(z) \\\\\\\\ &= \\bigg(y \\frac{1}{\\phi{(z)}} - (1 - y) \\frac{1}{1 - \\phi(z)} \\bigg) \\phi(z) \\big(1 - \\phi(z)\\big) \\frac{\\partial}{\\partial w_j}z\\\\\\\\ &= \\big(y(1-\\phi(z)\\big) - (1 - y) \\phi(z)\\big)x_j\\\\\\\\ &=\\big(y - \\phi(z)\\big)x_j \\end{align} Now, in order to find the weights of the model, we take a step proportional to the positive direction of the gradient to maximize the log-likelihood. Futhermore, we add a coefficient, the learning rate \\eta to the weight update: \\begin{align} & w_j := w_j + \\eta \\frac{\\partial}{\\partial w_j} l(\\mathbf{w})\\\\\\\\ & w_j := w_j + \\eta \\sum^{n}_{i=1} \\big( y^{(i)} - \\phi\\big(z^{(i)}\\big)\\big)x_j^{(i)} \\end{align} Note that the gradient (and weight update) is computed from all samples in the training set in gradient ascent/descent in contrast to stochastic gradient ascent/descent. For more information about the differences between gradient descent and stochastic gradient descent, please see the related article Gradient Descent and Stochastic Gradient Descent . The previous equation shows the weight update for a single weight j . In gradient-based optimization, all weight coefficients are updated simultaneously; the weight update can be written more compactly as \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w}, where \\Delta{\\mathbf{w}} = \\eta \\nabla l(\\mathbf{w}) Gradient Descent and the logistic cost function In the previous section, we derived the gradient of the log-likelihood function, which can be optimized via gradient ascent. Similarly, we can obtain the cost gradient of the logistic cost function J(\\cdot) and minimize it via gradient descent in order to learn the logistic regression model. The update rule for a single weight: \\begin{align} & \\Delta{w_j} = -\\eta \\frac{\\partial J}{\\partial w_j} \\\\ & = - \\eta \\sum_{i=1}^{n}\\big(y^{(i)} - \\phi\\big(z^{(i)}\\big) x^{(i)} \\big) \\end{align} The simultaneous weight update: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w} where \\Delta{\\mathbf{w}} = - \\eta \\nabla J(\\mathbf{w}). Shuffling Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates Regularization As a way to tackle overfitting, we can add additional bias to the logistic regression model via a regularization terms. Via the L2 regularization term, we reduce the complexity of the model by penalizing large weight coefficients: L2: \\frac{\\lambda}{2}\\lVert \\mathbf{w} \\lVert_2 = \\frac{\\lambda}{2} \\sum_{j=1}^{m} w_j^2 In order to apply regularization, we just need to add the regularization term to the cost function that we defined for logistic regression to shrink the weights: J(\\mathbf{w}) = \\sum_{i=1}^{m} \\Bigg[ - y^{(i)} log \\bigg( \\phi\\big(z^{(i)}\\big) \\bigg) - \\big(1 - y^{(i)}\\big) log\\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg) \\Bigg] + \\frac{\\lambda}{2} \\sum_{j=1}^{m} w_j^2 The update rule for a single weight: \\begin{align} & \\Delta{w_j} = -\\eta \\bigg( \\frac{\\partial J}{\\partial w_j} + \\lambda w_j\\bigg)\\\\ & = - \\eta \\sum_{i=1}^{n}\\big(y^{(i)} - \\phi\\big(z^{(i)}\\big) x^{(i)} \\big) - \\eta \\lambda w_j \\end{align} The simultaneous weight update: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w} where \\Delta{\\mathbf{w}} = - \\eta \\big( \\nabla J(\\mathbf{w}) + \\lambda \\mathbf{w}\\big). For more information on regularization, please see Regularization of Generalized Linear Models . References Bishop, Christopher M. Pattern recognition and machine learning . Springer, 2006. pp. 203-213 Example 1 - Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.1, l2_lambda=0.0, epochs=100, minibatches=1, # for Gradient Descent random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 100/100 | Cost 0.32 | Elapsed: 0:00:00 | ETA: 0:00:00 Predicting Class Labels y_pred = lr.predict(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [1 1 1] Predicting Class Probabilities y_pred = lr.predict_proba(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [ 0.99997968 0.99339873 0.99992707] Example 2 - Stochastic Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.5, epochs=30, l2_lambda=0.0, minibatches=len(y), # for SGD learning random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 30/30 | Cost 0.27 | Elapsed: 0:00:00 | ETA: 0:00:00 Example 3 - Stochastic Gradient Descent w. Minibatches Here, we set minibatches to 5, which will result in Minibatch Learning with a batch size of 20 samples (since 100 Iris samples divided by 5 minibatches equals 20). from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.5, epochs=30, l2_lambda=0.0, minibatches=5, # 100/5 = 20 -> minibatch-s random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 30/30 | Cost 0.25 | Elapsed: 0:00:00 | ETA: 0:00:00 API LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Logistic Regression"},{"location":"user_guide/classifier/LogisticRegression/#logistic-regression","text":"A logistic regression class for binary classification tasks. from mlxtend.classifier import LogisticRegression","title":"Logistic Regression"},{"location":"user_guide/classifier/LogisticRegression/#overview","text":"Related to the Perceptron and 'Adaline' , a Logistic Regression model is a linear model for binary classification. However, instead of minimizing a linear cost function such as the sum of squared errors (SSE) in Adaline, we minimize a sigmoid function, i.e., the logistic function: \\phi(z) = \\frac{1}{1 + e^{-z}}, where z is defined as the net input z = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{j=0}^{m} w_j x_j= \\mathbf{w}^T\\mathbf{x}. The net input is in turn based on the logit function logit(p(y=1 \\mid \\mathbf{x})) = z. Here, p(y=1 \\mid \\mathbf{x}) is the conditional probability that a particular sample belongs to class 1 given its features \\mathbf{x} . The logit function takes inputs in the range [0, 1] and transform them to values over the entire real number range. In contrast, the logistic function takes input values over the entire real number range and transforms them to values in the range [0, 1]. In other words, the logistic function is the inverse of the logit function, and it lets us predict the conditional probability that a certain sample belongs to class 1 (or class 0). After model fitting, the conditional probability p(y=1 \\mid \\mathbf{x}) is converted to a binary class label via a threshold function g(\\cdot) : $$y = g({z}) = \\begin{cases} 1 & \\text{if $\\phi(z) \\ge 0.5$}\\\\ 0 & \\text{otherwise.} \\end{cases} $$ or equivalently: $$y = g({z}) = \\begin{cases} 1 & \\text{if z $\\ge$ 0}\\\\ 0 & \\text{otherwise}. \\end{cases} $$","title":"Overview"},{"location":"user_guide/classifier/LogisticRegression/#objective-function-log-likelihood","text":"In order to parameterize a logistic regression model, we maximize the likelihood L(\\cdot) (or minimize the logistic cost function). We write the likelihood as L(\\mathbf{w}) = P(\\mathbf{y} \\mid \\mathbf{x};\\mathbf{w}) = \\prod_{i=1}^{n} P\\big(y^{(i)} \\mid x^{(i)}; \\mathbf{w}\\big) = \\prod^{n}_{i=1}\\bigg(\\phi\\big(z^{(i)}\\big)\\bigg)^{y^{(i)}} \\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg)^{1-y^{(i)}}, under the assumption that the training samples are independent of each other. In practice, it is easier to maximize the (natural) log of this equation, which is called the log-likelihood function: l(\\mathbf{w}) = \\log L(\\mathbf{w}) = \\sum^{n}_{i=1} y^{(i)} \\log \\bigg(\\phi\\big(z^{(i)}\\big)\\bigg) + \\big( 1 - y^{(i)}\\big) \\log \\big(1-\\phi\\big(z^{(i)}\\big)\\big) One advantage of taking the log is to avoid numeric underflow (and challenges with floating point math) for very small likelihoods. Another advantage is that we can obtain the derivative more easily, using the addition trick to rewrite the product of factors as a summation term, which we can then maximize using optimization algorithms such as gradient ascent.","title":"Objective Function -- Log-Likelihood"},{"location":"user_guide/classifier/LogisticRegression/#objective-function-logistic-cost-function","text":"An alternative to maximizing the log-likelihood, we can define a cost function J(\\cdot) to be minimized; we rewrite the log-likelihood as: J(\\mathbf{w}) = \\sum_{i=1}^{m} - y^{(i)} log \\bigg( \\phi\\big(z^{(i)}\\big) \\bigg) - \\big(1 - y^{(i)}\\big) log\\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg) $$ J\\big(\\phi(z), y; \\mathbf{w}\\big) = \\begin{cases} -log\\big(\\phi(z) \\big) & \\text{if $y = 1$}\\\\ -log\\big(1- \\phi(z) \\big) & \\text{if $y = 0$} \\end{cases} $$ As we can see in the figure above, we penalize wrong predictions with an increasingly larger cost.","title":"Objective Function -- Logistic Cost Function"},{"location":"user_guide/classifier/LogisticRegression/#gradient-descent-gd-and-stochastic-gradient-descent-sgd-optimization","text":"","title":"Gradient Descent (GD) and Stochastic Gradient Descent (SGD) Optimization"},{"location":"user_guide/classifier/LogisticRegression/#gradient-ascent-and-the-log-likelihood","text":"To learn the weight coefficient of a logistic regression model via gradient-based optimization, we compute the partial derivative of the log-likelihood function -- w.r.t. the j th weight -- as follows: \\frac{\\partial}{\\partial w_j} l(\\mathbf{w}) = \\bigg(y \\frac{1}{\\phi(z)} - (1-y) \\frac{1}{1-\\phi{(z)}} \\bigg) \\frac{\\partial}{\\partial w_j}\\phi(z) As an intermediate step, we compute the partial derivative of the sigmoid function, which will come in handy later: \\begin{align} &\\frac{\\partial}{\\partial z} \\phi(z) = \\frac{\\partial}{{\\partial z}} \\frac{1}{1+e^{-z}} \\\\\\\\ &= \\frac{1}{(1 + e^{-z})^{2}} e^{-z}\\\\\\\\ &= \\frac{1}{1+e^{-z}} \\bigg(1 - \\frac{1}{1+e^{-z}} \\bigg)\\\\\\\\ &= \\phi(z)\\big(1-\\phi(z)\\big) \\end{align} Now, we re-substitute \\frac{\\partial}{\\partial z} \\phi(z) = \\phi(z) \\big(1 - \\phi(z)\\big) back into in the log-likelihood partial derivative equation and obtain the equation shown below: \\begin{align} & \\bigg(y \\frac{1}{\\phi{(z)}} - (1 - y) \\frac{1}{1 - \\phi(z)} \\bigg) \\frac{\\partial}{\\partial w_j} \\phi(z) \\\\\\\\ &= \\bigg(y \\frac{1}{\\phi{(z)}} - (1 - y) \\frac{1}{1 - \\phi(z)} \\bigg) \\phi(z) \\big(1 - \\phi(z)\\big) \\frac{\\partial}{\\partial w_j}z\\\\\\\\ &= \\big(y(1-\\phi(z)\\big) - (1 - y) \\phi(z)\\big)x_j\\\\\\\\ &=\\big(y - \\phi(z)\\big)x_j \\end{align} Now, in order to find the weights of the model, we take a step proportional to the positive direction of the gradient to maximize the log-likelihood. Futhermore, we add a coefficient, the learning rate \\eta to the weight update: \\begin{align} & w_j := w_j + \\eta \\frac{\\partial}{\\partial w_j} l(\\mathbf{w})\\\\\\\\ & w_j := w_j + \\eta \\sum^{n}_{i=1} \\big( y^{(i)} - \\phi\\big(z^{(i)}\\big)\\big)x_j^{(i)} \\end{align} Note that the gradient (and weight update) is computed from all samples in the training set in gradient ascent/descent in contrast to stochastic gradient ascent/descent. For more information about the differences between gradient descent and stochastic gradient descent, please see the related article Gradient Descent and Stochastic Gradient Descent . The previous equation shows the weight update for a single weight j . In gradient-based optimization, all weight coefficients are updated simultaneously; the weight update can be written more compactly as \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w}, where \\Delta{\\mathbf{w}} = \\eta \\nabla l(\\mathbf{w})","title":"Gradient Ascent and the log-likelihood"},{"location":"user_guide/classifier/LogisticRegression/#gradient-descent-and-the-logistic-cost-function","text":"In the previous section, we derived the gradient of the log-likelihood function, which can be optimized via gradient ascent. Similarly, we can obtain the cost gradient of the logistic cost function J(\\cdot) and minimize it via gradient descent in order to learn the logistic regression model. The update rule for a single weight: \\begin{align} & \\Delta{w_j} = -\\eta \\frac{\\partial J}{\\partial w_j} \\\\ & = - \\eta \\sum_{i=1}^{n}\\big(y^{(i)} - \\phi\\big(z^{(i)}\\big) x^{(i)} \\big) \\end{align} The simultaneous weight update: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w} where \\Delta{\\mathbf{w}} = - \\eta \\nabla J(\\mathbf{w}).","title":"Gradient Descent and the logistic cost function"},{"location":"user_guide/classifier/LogisticRegression/#shuffling","text":"Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates","title":"Shuffling"},{"location":"user_guide/classifier/LogisticRegression/#regularization","text":"As a way to tackle overfitting, we can add additional bias to the logistic regression model via a regularization terms. Via the L2 regularization term, we reduce the complexity of the model by penalizing large weight coefficients: L2: \\frac{\\lambda}{2}\\lVert \\mathbf{w} \\lVert_2 = \\frac{\\lambda}{2} \\sum_{j=1}^{m} w_j^2 In order to apply regularization, we just need to add the regularization term to the cost function that we defined for logistic regression to shrink the weights: J(\\mathbf{w}) = \\sum_{i=1}^{m} \\Bigg[ - y^{(i)} log \\bigg( \\phi\\big(z^{(i)}\\big) \\bigg) - \\big(1 - y^{(i)}\\big) log\\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg) \\Bigg] + \\frac{\\lambda}{2} \\sum_{j=1}^{m} w_j^2 The update rule for a single weight: \\begin{align} & \\Delta{w_j} = -\\eta \\bigg( \\frac{\\partial J}{\\partial w_j} + \\lambda w_j\\bigg)\\\\ & = - \\eta \\sum_{i=1}^{n}\\big(y^{(i)} - \\phi\\big(z^{(i)}\\big) x^{(i)} \\big) - \\eta \\lambda w_j \\end{align} The simultaneous weight update: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w} where \\Delta{\\mathbf{w}} = - \\eta \\big( \\nabla J(\\mathbf{w}) + \\lambda \\mathbf{w}\\big). For more information on regularization, please see Regularization of Generalized Linear Models .","title":"Regularization"},{"location":"user_guide/classifier/LogisticRegression/#references","text":"Bishop, Christopher M. Pattern recognition and machine learning . Springer, 2006. pp. 203-213","title":"References"},{"location":"user_guide/classifier/LogisticRegression/#example-1-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.1, l2_lambda=0.0, epochs=100, minibatches=1, # for Gradient Descent random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 100/100 | Cost 0.32 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 1 - Gradient Descent"},{"location":"user_guide/classifier/LogisticRegression/#predicting-class-labels","text":"y_pred = lr.predict(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [1 1 1]","title":"Predicting Class Labels"},{"location":"user_guide/classifier/LogisticRegression/#predicting-class-probabilities","text":"y_pred = lr.predict_proba(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [ 0.99997968 0.99339873 0.99992707]","title":"Predicting Class Probabilities"},{"location":"user_guide/classifier/LogisticRegression/#example-2-stochastic-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.5, epochs=30, l2_lambda=0.0, minibatches=len(y), # for SGD learning random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 30/30 | Cost 0.27 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 2 - Stochastic Gradient Descent"},{"location":"user_guide/classifier/LogisticRegression/#example-3-stochastic-gradient-descent-w-minibatches","text":"Here, we set minibatches to 5, which will result in Minibatch Learning with a batch size of 20 samples (since 100 Iris samples divided by 5 minibatches equals 20). from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.5, epochs=30, l2_lambda=0.0, minibatches=5, # 100/5 = 20 -> minibatch-s random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 30/30 | Cost 0.25 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 3 - Stochastic Gradient Descent w. Minibatches"},{"location":"user_guide/classifier/LogisticRegression/#api","text":"LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/","title":"API"},{"location":"user_guide/classifier/LogisticRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Methods"},{"location":"user_guide/classifier/MultiLayerPerceptron/","text":"Neural Network - Multilayer Perceptron Implementation of a multilayer perceptron, a feedforward artificial neural network. from mlxtend.classifier import MultiLayerPerceptron Overview Although the code is fully working and can be used for common classification tasks, this implementation is not geared towards efficiency but clarity \u2013 the original code was written for demonstration purposes. Basic Architecture The neurons x_0 and a_0 represent the bias units ( x_0=1 , a_0=1 ). The i th superscript denotes the i th layer, and the j th subscripts stands for the index of the respective unit. For example, a_{1}^{(2)} refers to the first activation unit after the bias unit (i.e., 2nd activation unit) in the 2nd layer (here: the hidden layer) \\begin{align} \\mathbf{a^{(2)}} &= \\begin{bmatrix} a_{0}^{(2)} \\\\ a_{1}^{(2)} \\\\ \\vdots \\\\ a_{m}^{(2)} \\end{bmatrix}. \\end{align} Each layer (l) in a multi-layer perceptron, a directed graph, is fully connected to the next layer (l+1) . We write the weight coefficient that connects the k th unit in the l th layer to the j th unit in layer l+1 as w^{(l)}_{j, k} . For example, the weight coefficient that connects the units a_0^{(2)} \\rightarrow a_1^{(3)} would be written as w_{1,0}^{(2)} . Activation In the current implementation, the activations of the hidden layer(s) are computed via the logistic (sigmoid) function \\phi(z) = \\frac{1}{1 + e^{-z}}. (For more details on the logistic function, please see classifier.LogisticRegression ; a general overview of different activation function can be found here .) Furthermore, the MLP uses the softmax function in the output layer, For more details on the logistic function, please see classifier.SoftmaxRegression . References D. R. G. H. R. Williams and G. Hinton. Learning representations by back-propagating errors . Nature, pages 323\u2013533, 1986. C. M. Bishop. Neural networks for pattern recognition . Oxford University Press, 1995. T. Hastie, J. Friedman, and R. Tibshirani. The Elements of Statistical Learning , Volume 2. Springer, 2009. Example 1 - Classifying Iris Flowers Load 2 features from Iris (petal length and petal width) for visualization purposes: from mlxtend.data import iris_data X, y = iris_data() X = X[:, [0, 3]] # standardize training data X_std = (X - X.mean(axis=0)) / X.std(axis=0) Train neural network for 3 output flower classes ('Setosa', 'Versicolor', 'Virginica'), regular gradient decent ( minibatches=1 ), 30 hidden units, and no regularization. Gradient Descent Setting the minibatches to 1 will result in gradient descent training; please see Gradient Descent vs. Stochastic Gradient Descent for details. from mlxtend.classifier import MultiLayerPerceptron as MLP nn1 = MLP(hidden_layers=[50], l2=0.00, l1=0.0, epochs=150, eta=0.05, momentum=0.1, decrease_const=0.0, minibatches=1, random_seed=1, print_progress=3) nn1 = nn1.fit(X_std, y) Iteration: 150/150 | Cost 0.06 | Elapsed: 0:00:00 | ETA: 0:00:00 from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt fig = plot_decision_regions(X=X_std, y=y, clf=nn1, legend=2) plt.title('Multi-layer Perceptron w. 1 hidden layer (logistic sigmoid)') plt.show() import matplotlib.pyplot as plt plt.plot(range(len(nn1.cost_)), nn1.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() print('Accuracy: %.2f%%' % (100 * nn1.score(X_std, y))) Accuracy: 96.67% Stochastic Gradient Descent Setting minibatches to n_samples will result in stochastic gradient descent training; please see Gradient Descent vs. Stochastic Gradient Descent for details. nn2 = MLP(hidden_layers=[50], l2=0.00, l1=0.0, epochs=5, eta=0.005, momentum=0.1, decrease_const=0.0, minibatches=len(y), random_seed=1, print_progress=3) nn2.fit(X_std, y) plt.plot(range(len(nn2.cost_)), nn2.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() Iteration: 5/5 | Cost 0.11 | Elapsed: 00:00:00 | ETA: 00:00:00 Continue the training for 25 epochs... nn2.epochs = 25 nn2 = nn2.fit(X_std, y) Iteration: 25/25 | Cost 0.07 | Elapsed: 0:00:00 | ETA: 0:00:00 plt.plot(range(len(nn2.cost_)), nn2.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() Example 2 - Classifying Handwritten Digits from a 10% MNIST Subset Load a 5000-sample subset of the MNIST dataset (please see data.loadlocal_mnist if you want to download and read in the complete MNIST dataset). from mlxtend.data import mnist_data from mlxtend.preprocessing import shuffle_arrays_unison X, y = mnist_data() X, y = shuffle_arrays_unison((X, y), random_seed=1) X_train, y_train = X[:500], y[:500] X_test, y_test = X[500:], y[500:] Visualize a sample from the MNIST dataset to check if it was loaded correctly: import matplotlib.pyplot as plt def plot_digit(X, y, idx): img = X[idx].reshape(28,28) plt.imshow(img, cmap='Greys', interpolation='nearest') plt.title('true label: %d' % y[idx]) plt.show() plot_digit(X, y, 3500) Standardize pixel values: import numpy as np from mlxtend.preprocessing import standardize X_train_std, params = standardize(X_train, columns=range(X_train.shape[1]), return_params=True) X_test_std = standardize(X_test, columns=range(X_test.shape[1]), params=params) Initialize the neural network to recognize the 10 different digits (0-10) using 300 epochs and mini-batch learning. nn1 = MLP(hidden_layers=[150], l2=0.00, l1=0.0, epochs=100, eta=0.005, momentum=0.0, decrease_const=0.0, minibatches=100, random_seed=1, print_progress=3) Learn the features while printing the progress to get an idea about how long it may take. import matplotlib.pyplot as plt nn1.fit(X_train_std, y_train) plt.plot(range(len(nn1.cost_)), nn1.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() Iteration: 100/100 | Cost 0.01 | Elapsed: 0:00:17 | ETA: 0:00:00 print('Train Accuracy: %.2f%%' % (100 * nn1.score(X_train_std, y_train))) print('Test Accuracy: %.2f%%' % (100 * nn1.score(X_test_std, y_test))) Train Accuracy: 100.00% Test Accuracy: 84.62% Please note that this neural network has been trained on only 10% of the MNIST data for technical demonstration purposes, hence, the lousy predictive performance. API MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Neural Network - Multilayer Perceptron"},{"location":"user_guide/classifier/MultiLayerPerceptron/#neural-network-multilayer-perceptron","text":"Implementation of a multilayer perceptron, a feedforward artificial neural network. from mlxtend.classifier import MultiLayerPerceptron","title":"Neural Network - Multilayer Perceptron"},{"location":"user_guide/classifier/MultiLayerPerceptron/#overview","text":"Although the code is fully working and can be used for common classification tasks, this implementation is not geared towards efficiency but clarity \u2013 the original code was written for demonstration purposes.","title":"Overview"},{"location":"user_guide/classifier/MultiLayerPerceptron/#basic-architecture","text":"The neurons x_0 and a_0 represent the bias units ( x_0=1 , a_0=1 ). The i th superscript denotes the i th layer, and the j th subscripts stands for the index of the respective unit. For example, a_{1}^{(2)} refers to the first activation unit after the bias unit (i.e., 2nd activation unit) in the 2nd layer (here: the hidden layer) \\begin{align} \\mathbf{a^{(2)}} &= \\begin{bmatrix} a_{0}^{(2)} \\\\ a_{1}^{(2)} \\\\ \\vdots \\\\ a_{m}^{(2)} \\end{bmatrix}. \\end{align} Each layer (l) in a multi-layer perceptron, a directed graph, is fully connected to the next layer (l+1) . We write the weight coefficient that connects the k th unit in the l th layer to the j th unit in layer l+1 as w^{(l)}_{j, k} . For example, the weight coefficient that connects the units a_0^{(2)} \\rightarrow a_1^{(3)} would be written as w_{1,0}^{(2)} .","title":"Basic Architecture"},{"location":"user_guide/classifier/MultiLayerPerceptron/#activation","text":"In the current implementation, the activations of the hidden layer(s) are computed via the logistic (sigmoid) function \\phi(z) = \\frac{1}{1 + e^{-z}}. (For more details on the logistic function, please see classifier.LogisticRegression ; a general overview of different activation function can be found here .) Furthermore, the MLP uses the softmax function in the output layer, For more details on the logistic function, please see classifier.SoftmaxRegression .","title":"Activation"},{"location":"user_guide/classifier/MultiLayerPerceptron/#references","text":"D. R. G. H. R. Williams and G. Hinton. Learning representations by back-propagating errors . Nature, pages 323\u2013533, 1986. C. M. Bishop. Neural networks for pattern recognition . Oxford University Press, 1995. T. Hastie, J. Friedman, and R. Tibshirani. The Elements of Statistical Learning , Volume 2. Springer, 2009.","title":"References"},{"location":"user_guide/classifier/MultiLayerPerceptron/#example-1-classifying-iris-flowers","text":"Load 2 features from Iris (petal length and petal width) for visualization purposes: from mlxtend.data import iris_data X, y = iris_data() X = X[:, [0, 3]] # standardize training data X_std = (X - X.mean(axis=0)) / X.std(axis=0) Train neural network for 3 output flower classes ('Setosa', 'Versicolor', 'Virginica'), regular gradient decent ( minibatches=1 ), 30 hidden units, and no regularization.","title":"Example 1 - Classifying Iris Flowers"},{"location":"user_guide/classifier/MultiLayerPerceptron/#gradient-descent","text":"Setting the minibatches to 1 will result in gradient descent training; please see Gradient Descent vs. Stochastic Gradient Descent for details. from mlxtend.classifier import MultiLayerPerceptron as MLP nn1 = MLP(hidden_layers=[50], l2=0.00, l1=0.0, epochs=150, eta=0.05, momentum=0.1, decrease_const=0.0, minibatches=1, random_seed=1, print_progress=3) nn1 = nn1.fit(X_std, y) Iteration: 150/150 | Cost 0.06 | Elapsed: 0:00:00 | ETA: 0:00:00 from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt fig = plot_decision_regions(X=X_std, y=y, clf=nn1, legend=2) plt.title('Multi-layer Perceptron w. 1 hidden layer (logistic sigmoid)') plt.show() import matplotlib.pyplot as plt plt.plot(range(len(nn1.cost_)), nn1.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() print('Accuracy: %.2f%%' % (100 * nn1.score(X_std, y))) Accuracy: 96.67%","title":"Gradient Descent"},{"location":"user_guide/classifier/MultiLayerPerceptron/#stochastic-gradient-descent","text":"Setting minibatches to n_samples will result in stochastic gradient descent training; please see Gradient Descent vs. Stochastic Gradient Descent for details. nn2 = MLP(hidden_layers=[50], l2=0.00, l1=0.0, epochs=5, eta=0.005, momentum=0.1, decrease_const=0.0, minibatches=len(y), random_seed=1, print_progress=3) nn2.fit(X_std, y) plt.plot(range(len(nn2.cost_)), nn2.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() Iteration: 5/5 | Cost 0.11 | Elapsed: 00:00:00 | ETA: 00:00:00 Continue the training for 25 epochs... nn2.epochs = 25 nn2 = nn2.fit(X_std, y) Iteration: 25/25 | Cost 0.07 | Elapsed: 0:00:00 | ETA: 0:00:00 plt.plot(range(len(nn2.cost_)), nn2.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show()","title":"Stochastic Gradient Descent"},{"location":"user_guide/classifier/MultiLayerPerceptron/#example-2-classifying-handwritten-digits-from-a-10-mnist-subset","text":"Load a 5000-sample subset of the MNIST dataset (please see data.loadlocal_mnist if you want to download and read in the complete MNIST dataset). from mlxtend.data import mnist_data from mlxtend.preprocessing import shuffle_arrays_unison X, y = mnist_data() X, y = shuffle_arrays_unison((X, y), random_seed=1) X_train, y_train = X[:500], y[:500] X_test, y_test = X[500:], y[500:] Visualize a sample from the MNIST dataset to check if it was loaded correctly: import matplotlib.pyplot as plt def plot_digit(X, y, idx): img = X[idx].reshape(28,28) plt.imshow(img, cmap='Greys', interpolation='nearest') plt.title('true label: %d' % y[idx]) plt.show() plot_digit(X, y, 3500) Standardize pixel values: import numpy as np from mlxtend.preprocessing import standardize X_train_std, params = standardize(X_train, columns=range(X_train.shape[1]), return_params=True) X_test_std = standardize(X_test, columns=range(X_test.shape[1]), params=params) Initialize the neural network to recognize the 10 different digits (0-10) using 300 epochs and mini-batch learning. nn1 = MLP(hidden_layers=[150], l2=0.00, l1=0.0, epochs=100, eta=0.005, momentum=0.0, decrease_const=0.0, minibatches=100, random_seed=1, print_progress=3) Learn the features while printing the progress to get an idea about how long it may take. import matplotlib.pyplot as plt nn1.fit(X_train_std, y_train) plt.plot(range(len(nn1.cost_)), nn1.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() Iteration: 100/100 | Cost 0.01 | Elapsed: 0:00:17 | ETA: 0:00:00 print('Train Accuracy: %.2f%%' % (100 * nn1.score(X_train_std, y_train))) print('Test Accuracy: %.2f%%' % (100 * nn1.score(X_test_std, y_test))) Train Accuracy: 100.00% Test Accuracy: 84.62% Please note that this neural network has been trained on only 10% of the MNIST data for technical demonstration purposes, hence, the lousy predictive performance.","title":"Example 2 - Classifying Handwritten Digits from a 10% MNIST Subset"},{"location":"user_guide/classifier/MultiLayerPerceptron/#api","text":"MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/","title":"API"},{"location":"user_guide/classifier/MultiLayerPerceptron/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Methods"},{"location":"user_guide/classifier/Perceptron/","text":"Perceptron Implementation of a Perceptron learning algorithm for classification. from mlxtend.classifier import Perceptron Overview The idea behind this \"thresholded\" perceptron was to mimic how a single neuron in the brain works: It either \"fires\" or not. A perceptron receives multiple input signals, and if the sum of the input signals exceed a certain threshold it either returns a signal or remains \"silent\" otherwise. What made this a \"machine learning\" algorithm was Frank Rosenblatt's idea of the perceptron learning rule: The perceptron algorithm is about learning the weights for the input signals in order to draw linear decision boundary that allows us to discriminate between the two linearly separable classes +1 and -1. Basic Notation Before we dive deeper into the algorithm(s) for learning the weights of the perceptron classifier, let us take a brief look at the basic notation. In the following sections, we will label the positive and negative class in our binary classification setting as \"1\" and \"-1\", respectively. Next, we define an activation function g(\\mathbf{z}) that takes a linear combination of the input values \\mathbf{x} and weights \\mathbf{w} as input ( \\mathbf{z} = w_1x_{1} + \\dots + w_mx_{m} ), and if g(\\mathbf{z}) is greater than a defined threshold \\theta we predict 1 and -1 otherwise; in this case, this activation function g is a simple \"unit step function,\" which is sometimes also called \"Heaviside step function.\" $$ g(z) = \\begin{cases} 1 & \\text{if $z \\ge \\theta$}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ where z = w_1x_{1} + \\dots + w_mx_{m} = \\sum_{j=1}^{m} x_{j}w_{j} \\\\ = \\mathbf{w}^T\\mathbf{x} \\mathbf{w} is the feature vector, and \\mathbf{x} is an m -dimensional sample from the training dataset: \\mathbf{w} = \\begin{bmatrix} w_{1} \\\\ \\vdots \\\\ w_{m} \\end{bmatrix} \\quad \\mathbf{x} = \\begin{bmatrix} x_{1} \\\\ \\vdots \\\\ x_{m} \\end{bmatrix} In order to simplify the notation, we bring \\theta to the left side of the equation and define w_0 = -\\theta \\text{ and } x_0=1 so that $$ g({z}) = \\begin{cases} 1 & \\text{if $z \\ge 0$}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ and z = w_0x_{0} + w_1x_{1} + \\dots + w_mx_{m} = \\sum_{j=0}^{m} x_{j}w_{j} \\\\ = \\mathbf{w}^T\\mathbf{x}. Perceptron Rule Rosenblatt's initial perceptron rule is fairly simple and can be summarized by the following steps: Initialize the weights to 0 or small random numbers. For each training sample \\mathbf{x^{(i)}} : Calculate the output value. Update the weights. The output value is the class label predicted by the unit step function that we defined earlier (output =g(\\mathbf{z}) ) and the weight update can be written more formally as w_j := w_j + \\Delta w_j . The value for updating the weights at each increment is calculated by the learning rule \\Delta w_j = \\eta \\; (\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{j} where \\eta is the learning rate (a constant between 0.0 and 1.0), \"target\" is the true class label, and the \"output\" is the predicted class label. aIt is important to note that all weights in the weight vector are being updated simultaneously. Concretely, for a 2-dimensional dataset, we would write the update as: \\Delta w_0 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)}) \\Delta w_1 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{1} \\Delta w_2 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{2} Before we implement the perceptron rule in Python, let us make a simple thought experiment to illustrate how beautifully simple this learning rule really is. In the two scenarios where the perceptron predicts the class label correctly, the weights remain unchanged: \\Delta w_j = \\eta(-1^{(i)} - -1^{(i)})\\;x^{(i)}_{j} = 0 \\Delta w_j = \\eta(1^{(i)} - 1^{(i)})\\;x^{(i)}_{j} = 0 However, in case of a wrong prediction, the weights are being \"pushed\" towards the direction of the positive or negative target class, respectively: \\Delta w_j = \\eta(1^{(i)} - -1^{(i)})\\;x^{(i)}_{j} = \\eta(2)\\;x^{(i)}_{j} \\Delta w_j = \\eta(-1^{(i)} - 1^{(i)})\\;x^{(i)}_{j} = \\eta(-2)\\;x^{(i)}_{j} It is important to note that the convergence of the perceptron is only guaranteed if the two classes are linearly separable. If the two classes can't be separated by a linear decision boundary, we can set a maximum number of passes over the training dataset (\"epochs\") and/or a threshold for the number of tolerated misclassifications. References F. Rosenblatt. The perceptron, a perceiving and recognizing automaton Project Para. Cornell Aeronautical Laboratory, 1957. Example 1 - Classification of Iris Flowers from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Perceptron import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() # Rosenblatt Perceptron ppn = Perceptron(epochs=5, eta=0.05, random_seed=0, print_progress=3) ppn.fit(X, y) plot_decision_regions(X, y, clf=ppn) plt.title('Perceptron - Rosenblatt Perceptron Rule') plt.show() print('Bias & Weights: %s' % ppn.w_) plt.plot(range(len(ppn.cost_)), ppn.cost_) plt.xlabel('Iterations') plt.ylabel('Missclassifications') plt.show() Iteration: 5/5 | Elapsed: 00:00:00 | ETA: 00:00:00 Bias & Weights: [[-0.04500809] [ 0.11048855]] API Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Perceptron"},{"location":"user_guide/classifier/Perceptron/#perceptron","text":"Implementation of a Perceptron learning algorithm for classification. from mlxtend.classifier import Perceptron","title":"Perceptron"},{"location":"user_guide/classifier/Perceptron/#overview","text":"The idea behind this \"thresholded\" perceptron was to mimic how a single neuron in the brain works: It either \"fires\" or not. A perceptron receives multiple input signals, and if the sum of the input signals exceed a certain threshold it either returns a signal or remains \"silent\" otherwise. What made this a \"machine learning\" algorithm was Frank Rosenblatt's idea of the perceptron learning rule: The perceptron algorithm is about learning the weights for the input signals in order to draw linear decision boundary that allows us to discriminate between the two linearly separable classes +1 and -1.","title":"Overview"},{"location":"user_guide/classifier/Perceptron/#basic-notation","text":"Before we dive deeper into the algorithm(s) for learning the weights of the perceptron classifier, let us take a brief look at the basic notation. In the following sections, we will label the positive and negative class in our binary classification setting as \"1\" and \"-1\", respectively. Next, we define an activation function g(\\mathbf{z}) that takes a linear combination of the input values \\mathbf{x} and weights \\mathbf{w} as input ( \\mathbf{z} = w_1x_{1} + \\dots + w_mx_{m} ), and if g(\\mathbf{z}) is greater than a defined threshold \\theta we predict 1 and -1 otherwise; in this case, this activation function g is a simple \"unit step function,\" which is sometimes also called \"Heaviside step function.\" $$ g(z) = \\begin{cases} 1 & \\text{if $z \\ge \\theta$}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ where z = w_1x_{1} + \\dots + w_mx_{m} = \\sum_{j=1}^{m} x_{j}w_{j} \\\\ = \\mathbf{w}^T\\mathbf{x} \\mathbf{w} is the feature vector, and \\mathbf{x} is an m -dimensional sample from the training dataset: \\mathbf{w} = \\begin{bmatrix} w_{1} \\\\ \\vdots \\\\ w_{m} \\end{bmatrix} \\quad \\mathbf{x} = \\begin{bmatrix} x_{1} \\\\ \\vdots \\\\ x_{m} \\end{bmatrix} In order to simplify the notation, we bring \\theta to the left side of the equation and define w_0 = -\\theta \\text{ and } x_0=1 so that $$ g({z}) = \\begin{cases} 1 & \\text{if $z \\ge 0$}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ and z = w_0x_{0} + w_1x_{1} + \\dots + w_mx_{m} = \\sum_{j=0}^{m} x_{j}w_{j} \\\\ = \\mathbf{w}^T\\mathbf{x}.","title":"Basic Notation"},{"location":"user_guide/classifier/Perceptron/#perceptron-rule","text":"Rosenblatt's initial perceptron rule is fairly simple and can be summarized by the following steps: Initialize the weights to 0 or small random numbers. For each training sample \\mathbf{x^{(i)}} : Calculate the output value. Update the weights. The output value is the class label predicted by the unit step function that we defined earlier (output =g(\\mathbf{z}) ) and the weight update can be written more formally as w_j := w_j + \\Delta w_j . The value for updating the weights at each increment is calculated by the learning rule \\Delta w_j = \\eta \\; (\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{j} where \\eta is the learning rate (a constant between 0.0 and 1.0), \"target\" is the true class label, and the \"output\" is the predicted class label. aIt is important to note that all weights in the weight vector are being updated simultaneously. Concretely, for a 2-dimensional dataset, we would write the update as: \\Delta w_0 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)}) \\Delta w_1 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{1} \\Delta w_2 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{2} Before we implement the perceptron rule in Python, let us make a simple thought experiment to illustrate how beautifully simple this learning rule really is. In the two scenarios where the perceptron predicts the class label correctly, the weights remain unchanged: \\Delta w_j = \\eta(-1^{(i)} - -1^{(i)})\\;x^{(i)}_{j} = 0 \\Delta w_j = \\eta(1^{(i)} - 1^{(i)})\\;x^{(i)}_{j} = 0 However, in case of a wrong prediction, the weights are being \"pushed\" towards the direction of the positive or negative target class, respectively: \\Delta w_j = \\eta(1^{(i)} - -1^{(i)})\\;x^{(i)}_{j} = \\eta(2)\\;x^{(i)}_{j} \\Delta w_j = \\eta(-1^{(i)} - 1^{(i)})\\;x^{(i)}_{j} = \\eta(-2)\\;x^{(i)}_{j} It is important to note that the convergence of the perceptron is only guaranteed if the two classes are linearly separable. If the two classes can't be separated by a linear decision boundary, we can set a maximum number of passes over the training dataset (\"epochs\") and/or a threshold for the number of tolerated misclassifications.","title":"Perceptron Rule"},{"location":"user_guide/classifier/Perceptron/#references","text":"F. Rosenblatt. The perceptron, a perceiving and recognizing automaton Project Para. Cornell Aeronautical Laboratory, 1957.","title":"References"},{"location":"user_guide/classifier/Perceptron/#example-1-classification-of-iris-flowers","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Perceptron import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() # Rosenblatt Perceptron ppn = Perceptron(epochs=5, eta=0.05, random_seed=0, print_progress=3) ppn.fit(X, y) plot_decision_regions(X, y, clf=ppn) plt.title('Perceptron - Rosenblatt Perceptron Rule') plt.show() print('Bias & Weights: %s' % ppn.w_) plt.plot(range(len(ppn.cost_)), ppn.cost_) plt.xlabel('Iterations') plt.ylabel('Missclassifications') plt.show() Iteration: 5/5 | Elapsed: 00:00:00 | ETA: 00:00:00 Bias & Weights: [[-0.04500809] [ 0.11048855]]","title":"Example 1 - Classification of Iris Flowers"},{"location":"user_guide/classifier/Perceptron/#api","text":"Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/","title":"API"},{"location":"user_guide/classifier/Perceptron/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Methods"},{"location":"user_guide/classifier/SoftmaxRegression/","text":"Softmax Regression A logistic regression class for multi-class classification tasks. from mlxtend.classifier import SoftmaxRegression Overview Softmax Regression (synonyms: Multinomial Logistic , Maximum Entropy Classifier , or just Multi-class Logistic Regression ) is a generalization of logistic regression that we can use for multi-class classification (under the assumption that the classes are mutually exclusive). In contrast, we use the (standard) Logistic Regression model in binary classification tasks. Below is a schematic of a Logistic Regression model, for more details, please see the LogisticRegression manual . In Softmax Regression (SMR), we replace the sigmoid logistic function by the so-called softmax function \\phi_{softmax}(\\cdot) . P(y=j \\mid z^{(i)}) = \\phi_{softmax}(z^{(i)}) = \\frac{e^{z^{(i)}}}{\\sum_{j=0}^{k} e^{z_{k}^{(i)}}}, where we define the net input z as z = w_1x_1 + ... + w_mx_m + b= \\sum_{l=1}^{m} w_l x_l + b= \\mathbf{w}^T\\mathbf{x} + b. ( w is the weight vector, \\mathbf{x} is the feature vector of 1 training sample, and b is the bias unit.) Now, this softmax function computes the probability that this training sample \\mathbf{x}^{(i)} belongs to class j given the weight and net input z^{(i)} . So, we compute the probability p(y = j \\mid \\mathbf{x^{(i)}; w}_j) for each class label in j = 1, \\ldots, k. . Note the normalization term in the denominator which causes these class probabilities to sum up to one. To illustrate the concept of softmax, let us walk through a concrete example. Let's assume we have a training set consisting of 4 samples from 3 different classes (0, 1, and 2) x_0 \\rightarrow \\text{class }0 x_1 \\rightarrow \\text{class }1 x_2 \\rightarrow \\text{class }2 x_3 \\rightarrow \\text{class }2 import numpy as np y = np.array([0, 1, 2, 2]) First, we want to encode the class labels into a format that we can more easily work with; we apply one-hot encoding: y_enc = (np.arange(np.max(y) + 1) == y[:, None]).astype(float) print('one-hot encoding:\\n', y_enc) one-hot encoding: [[ 1. 0. 0.] [ 0. 1. 0.] [ 0. 0. 1.] [ 0. 0. 1.]] A sample that belongs to class 0 (the first row) has a 1 in the first cell, a sample that belongs to class 2 has a 1 in the second cell of its row, and so forth. Next, let us define the feature matrix of our 4 training samples. Here, we assume that our dataset consists of 2 features; thus, we create a 4x2 dimensional matrix of our samples and features. Similarly, we create a 2x3 dimensional weight matrix (one row per feature and one column for each class). X = np.array([[0.1, 0.5], [1.1, 2.3], [-1.1, -2.3], [-1.5, -2.5]]) W = np.array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]) bias = np.array([0.01, 0.1, 0.1]) print('Inputs X:\\n', X) print('\\nWeights W:\\n', W) print('\\nbias:\\n', bias) Inputs X: [[ 0.1 0.5] [ 1.1 2.3] [-1.1 -2.3] [-1.5 -2.5]] Weights W: [[ 0.1 0.2 0.3] [ 0.1 0.2 0.3]] bias: [ 0.01 0.1 0.1 ] To compute the net input, we multiply the 4x2 matrix feature matrix X with the 2x3 (n_features x n_classes) weight matrix W , which yields a 4x3 output matrix (n_samples x n_classes) to which we then add the bias unit: \\mathbf{Z} = \\mathbf{X}\\mathbf{W} + \\mathbf{b}. X = np.array([[0.1, 0.5], [1.1, 2.3], [-1.1, -2.3], [-1.5, -2.5]]) W = np.array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]) bias = np.array([0.01, 0.1, 0.1]) print('Inputs X:\\n', X) print('\\nWeights W:\\n', W) print('\\nbias:\\n', bias) Inputs X: [[ 0.1 0.5] [ 1.1 2.3] [-1.1 -2.3] [-1.5 -2.5]] Weights W: [[ 0.1 0.2 0.3] [ 0.1 0.2 0.3]] bias: [ 0.01 0.1 0.1 ] def net_input(X, W, b): return (X.dot(W) + b) net_in = net_input(X, W, bias) print('net input:\\n', net_in) net input: [[ 0.07 0.22 0.28] [ 0.35 0.78 1.12] [-0.33 -0.58 -0.92] [-0.39 -0.7 -1.1 ]] Now, it's time to compute the softmax activation that we discussed earlier: P(y=j \\mid z^{(i)}) = \\phi_{softmax}(z^{(i)}) = \\frac{e^{z^{(i)}}}{\\sum_{j=0}^{k} e^{z_{k}^{(i)}}}. def softmax(z): return (np.exp(z.T) / np.sum(np.exp(z), axis=1)).T smax = softmax(net_in) print('softmax:\\n', smax) softmax: [[ 0.29450637 0.34216758 0.36332605] [ 0.21290077 0.32728332 0.45981591] [ 0.42860913 0.33380113 0.23758974] [ 0.44941979 0.32962558 0.22095463]] As we can see, the values for each sample (row) nicely sum up to 1 now. E.g., we can say that the first sample [ 0.29450637 0.34216758 0.36332605] has a 29.45% probability to belong to class 0. Now, in order to turn these probabilities back into class labels, we could simply take the argmax-index position of each row: [[ 0.29450637 0.34216758 0.36332605 ] -> 2 [ 0.21290077 0.32728332 0.45981591 ] -> 2 [ 0.42860913 0.33380113 0.23758974] -> 0 [ 0.44941979 0.32962558 0.22095463]] -> 0 def to_classlabel(z): return z.argmax(axis=1) print('predicted class labels: ', to_classlabel(smax)) predicted class labels: [2 2 0 0] As we can see, our predictions are terribly wrong, since the correct class labels are [0, 1, 2, 2] . Now, in order to train our logistic model (e.g., via an optimization algorithm such as gradient descent), we need to define a cost function J(\\cdot) that we want to minimize: J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum_{i=1}^{n} H(T_i, O_i), which is the average of all cross-entropies over our n training samples. The cross-entropy function is defined as H(T_i, O_i) = -\\sum_m T_i \\cdot log(O_i). Here the T stands for \"target\" (i.e., the true class labels) and the O stands for output -- the computed probability via softmax; not the predicted class label. def cross_entropy(output, y_target): return - np.sum(np.log(output) * (y_target), axis=1) xent = cross_entropy(smax, y_enc) print('Cross Entropy:', xent) Cross Entropy: [ 1.22245465 1.11692907 1.43720989 1.50979788] def cost(output, y_target): return np.mean(cross_entropy(output, y_target)) J_cost = cost(smax, y_enc) print('Cost: ', J_cost) Cost: 1.32159787159 In order to learn our softmax model -- determining the weight coefficients -- via gradient descent, we then need to compute the derivative \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}). I don't want to walk through the tedious details here, but this cost derivative turns out to be simply: \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum^{n}_{i=0} \\big[\\mathbf{x}^{(i)}\\ \\big(O_i - T_i \\big) \\big] We can then use the cost derivate to update the weights in opposite direction of the cost gradient with learning rate \\eta : \\mathbf{w}_j := \\mathbf{w}_j - \\eta \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}) for each class j \\in \\{0, 1, ..., k\\} (note that \\mathbf{w}_j is the weight vector for the class y=j ), and we update the bias units \\mathbf{b}_j := \\mathbf{b}_j - \\eta \\bigg[ \\frac{1}{n} \\sum^{n}_{i=0} \\big(O_i - T_i \\big) \\bigg]. As a penalty against complexity, an approach to reduce the variance of our model and decrease the degree of overfitting by adding additional bias, we can further add a regularization term such as the L2 term with the regularization parameter \\lambda : L2: \\frac{\\lambda}{2} ||\\mathbf{w}||_{2}^{2} , where ||\\mathbf{w}||_{2}^{2} = \\sum^{m}_{l=0} \\sum^{k}_{j=0} w_{i, j} so that our cost function becomes J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum_{i=1}^{n} H(T_i, O_i) + \\frac{\\lambda}{2} ||\\mathbf{w}||_{2}^{2} and we define the \"regularized\" weight update as \\mathbf{w}_j := \\mathbf{w}_j - \\eta \\big[\\nabla \\mathbf{w}_j \\, J(\\mathbf{W}) + \\lambda \\mathbf{w}_j \\big]. (Please note that we don't regularize the bias term.) Example 1 - Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import SoftmaxRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = SoftmaxRegression(eta=0.01, epochs=500, minibatches=1, random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Softmax Regression - Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 500/500 | Cost 0.06 | Elapsed: 0:00:00 | ETA: 0:00:00 Predicting Class Labels y_pred = lr.predict(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [2 2 2] Predicting Class Probabilities y_pred = lr.predict_proba(X) print('Last 3 Class Labels:\\n %s' % y_pred[-3:]) Last 3 Class Labels: [[ 9.18728149e-09 1.68894679e-02 9.83110523e-01] [ 2.97052325e-11 7.26356627e-04 9.99273643e-01] [ 1.57464093e-06 1.57779528e-01 8.42218897e-01]] Example 2 - Stochastic Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import SoftmaxRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = SoftmaxRegression(eta=0.01, epochs=300, minibatches=len(y), random_seed=1) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Softmax Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() API SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Softmax Regression"},{"location":"user_guide/classifier/SoftmaxRegression/#softmax-regression","text":"A logistic regression class for multi-class classification tasks. from mlxtend.classifier import SoftmaxRegression","title":"Softmax Regression"},{"location":"user_guide/classifier/SoftmaxRegression/#overview","text":"Softmax Regression (synonyms: Multinomial Logistic , Maximum Entropy Classifier , or just Multi-class Logistic Regression ) is a generalization of logistic regression that we can use for multi-class classification (under the assumption that the classes are mutually exclusive). In contrast, we use the (standard) Logistic Regression model in binary classification tasks. Below is a schematic of a Logistic Regression model, for more details, please see the LogisticRegression manual . In Softmax Regression (SMR), we replace the sigmoid logistic function by the so-called softmax function \\phi_{softmax}(\\cdot) . P(y=j \\mid z^{(i)}) = \\phi_{softmax}(z^{(i)}) = \\frac{e^{z^{(i)}}}{\\sum_{j=0}^{k} e^{z_{k}^{(i)}}}, where we define the net input z as z = w_1x_1 + ... + w_mx_m + b= \\sum_{l=1}^{m} w_l x_l + b= \\mathbf{w}^T\\mathbf{x} + b. ( w is the weight vector, \\mathbf{x} is the feature vector of 1 training sample, and b is the bias unit.) Now, this softmax function computes the probability that this training sample \\mathbf{x}^{(i)} belongs to class j given the weight and net input z^{(i)} . So, we compute the probability p(y = j \\mid \\mathbf{x^{(i)}; w}_j) for each class label in j = 1, \\ldots, k. . Note the normalization term in the denominator which causes these class probabilities to sum up to one. To illustrate the concept of softmax, let us walk through a concrete example. Let's assume we have a training set consisting of 4 samples from 3 different classes (0, 1, and 2) x_0 \\rightarrow \\text{class }0 x_1 \\rightarrow \\text{class }1 x_2 \\rightarrow \\text{class }2 x_3 \\rightarrow \\text{class }2 import numpy as np y = np.array([0, 1, 2, 2]) First, we want to encode the class labels into a format that we can more easily work with; we apply one-hot encoding: y_enc = (np.arange(np.max(y) + 1) == y[:, None]).astype(float) print('one-hot encoding:\\n', y_enc) one-hot encoding: [[ 1. 0. 0.] [ 0. 1. 0.] [ 0. 0. 1.] [ 0. 0. 1.]] A sample that belongs to class 0 (the first row) has a 1 in the first cell, a sample that belongs to class 2 has a 1 in the second cell of its row, and so forth. Next, let us define the feature matrix of our 4 training samples. Here, we assume that our dataset consists of 2 features; thus, we create a 4x2 dimensional matrix of our samples and features. Similarly, we create a 2x3 dimensional weight matrix (one row per feature and one column for each class). X = np.array([[0.1, 0.5], [1.1, 2.3], [-1.1, -2.3], [-1.5, -2.5]]) W = np.array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]) bias = np.array([0.01, 0.1, 0.1]) print('Inputs X:\\n', X) print('\\nWeights W:\\n', W) print('\\nbias:\\n', bias) Inputs X: [[ 0.1 0.5] [ 1.1 2.3] [-1.1 -2.3] [-1.5 -2.5]] Weights W: [[ 0.1 0.2 0.3] [ 0.1 0.2 0.3]] bias: [ 0.01 0.1 0.1 ] To compute the net input, we multiply the 4x2 matrix feature matrix X with the 2x3 (n_features x n_classes) weight matrix W , which yields a 4x3 output matrix (n_samples x n_classes) to which we then add the bias unit: \\mathbf{Z} = \\mathbf{X}\\mathbf{W} + \\mathbf{b}. X = np.array([[0.1, 0.5], [1.1, 2.3], [-1.1, -2.3], [-1.5, -2.5]]) W = np.array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]) bias = np.array([0.01, 0.1, 0.1]) print('Inputs X:\\n', X) print('\\nWeights W:\\n', W) print('\\nbias:\\n', bias) Inputs X: [[ 0.1 0.5] [ 1.1 2.3] [-1.1 -2.3] [-1.5 -2.5]] Weights W: [[ 0.1 0.2 0.3] [ 0.1 0.2 0.3]] bias: [ 0.01 0.1 0.1 ] def net_input(X, W, b): return (X.dot(W) + b) net_in = net_input(X, W, bias) print('net input:\\n', net_in) net input: [[ 0.07 0.22 0.28] [ 0.35 0.78 1.12] [-0.33 -0.58 -0.92] [-0.39 -0.7 -1.1 ]] Now, it's time to compute the softmax activation that we discussed earlier: P(y=j \\mid z^{(i)}) = \\phi_{softmax}(z^{(i)}) = \\frac{e^{z^{(i)}}}{\\sum_{j=0}^{k} e^{z_{k}^{(i)}}}. def softmax(z): return (np.exp(z.T) / np.sum(np.exp(z), axis=1)).T smax = softmax(net_in) print('softmax:\\n', smax) softmax: [[ 0.29450637 0.34216758 0.36332605] [ 0.21290077 0.32728332 0.45981591] [ 0.42860913 0.33380113 0.23758974] [ 0.44941979 0.32962558 0.22095463]] As we can see, the values for each sample (row) nicely sum up to 1 now. E.g., we can say that the first sample [ 0.29450637 0.34216758 0.36332605] has a 29.45% probability to belong to class 0. Now, in order to turn these probabilities back into class labels, we could simply take the argmax-index position of each row: [[ 0.29450637 0.34216758 0.36332605 ] -> 2 [ 0.21290077 0.32728332 0.45981591 ] -> 2 [ 0.42860913 0.33380113 0.23758974] -> 0 [ 0.44941979 0.32962558 0.22095463]] -> 0 def to_classlabel(z): return z.argmax(axis=1) print('predicted class labels: ', to_classlabel(smax)) predicted class labels: [2 2 0 0] As we can see, our predictions are terribly wrong, since the correct class labels are [0, 1, 2, 2] . Now, in order to train our logistic model (e.g., via an optimization algorithm such as gradient descent), we need to define a cost function J(\\cdot) that we want to minimize: J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum_{i=1}^{n} H(T_i, O_i), which is the average of all cross-entropies over our n training samples. The cross-entropy function is defined as H(T_i, O_i) = -\\sum_m T_i \\cdot log(O_i). Here the T stands for \"target\" (i.e., the true class labels) and the O stands for output -- the computed probability via softmax; not the predicted class label. def cross_entropy(output, y_target): return - np.sum(np.log(output) * (y_target), axis=1) xent = cross_entropy(smax, y_enc) print('Cross Entropy:', xent) Cross Entropy: [ 1.22245465 1.11692907 1.43720989 1.50979788] def cost(output, y_target): return np.mean(cross_entropy(output, y_target)) J_cost = cost(smax, y_enc) print('Cost: ', J_cost) Cost: 1.32159787159 In order to learn our softmax model -- determining the weight coefficients -- via gradient descent, we then need to compute the derivative \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}). I don't want to walk through the tedious details here, but this cost derivative turns out to be simply: \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum^{n}_{i=0} \\big[\\mathbf{x}^{(i)}\\ \\big(O_i - T_i \\big) \\big] We can then use the cost derivate to update the weights in opposite direction of the cost gradient with learning rate \\eta : \\mathbf{w}_j := \\mathbf{w}_j - \\eta \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}) for each class j \\in \\{0, 1, ..., k\\} (note that \\mathbf{w}_j is the weight vector for the class y=j ), and we update the bias units \\mathbf{b}_j := \\mathbf{b}_j - \\eta \\bigg[ \\frac{1}{n} \\sum^{n}_{i=0} \\big(O_i - T_i \\big) \\bigg]. As a penalty against complexity, an approach to reduce the variance of our model and decrease the degree of overfitting by adding additional bias, we can further add a regularization term such as the L2 term with the regularization parameter \\lambda : L2: \\frac{\\lambda}{2} ||\\mathbf{w}||_{2}^{2} , where ||\\mathbf{w}||_{2}^{2} = \\sum^{m}_{l=0} \\sum^{k}_{j=0} w_{i, j} so that our cost function becomes J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum_{i=1}^{n} H(T_i, O_i) + \\frac{\\lambda}{2} ||\\mathbf{w}||_{2}^{2} and we define the \"regularized\" weight update as \\mathbf{w}_j := \\mathbf{w}_j - \\eta \\big[\\nabla \\mathbf{w}_j \\, J(\\mathbf{W}) + \\lambda \\mathbf{w}_j \\big]. (Please note that we don't regularize the bias term.)","title":"Overview"},{"location":"user_guide/classifier/SoftmaxRegression/#example-1-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import SoftmaxRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = SoftmaxRegression(eta=0.01, epochs=500, minibatches=1, random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Softmax Regression - Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 500/500 | Cost 0.06 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 1 - Gradient Descent"},{"location":"user_guide/classifier/SoftmaxRegression/#predicting-class-labels","text":"y_pred = lr.predict(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [2 2 2]","title":"Predicting Class Labels"},{"location":"user_guide/classifier/SoftmaxRegression/#predicting-class-probabilities","text":"y_pred = lr.predict_proba(X) print('Last 3 Class Labels:\\n %s' % y_pred[-3:]) Last 3 Class Labels: [[ 9.18728149e-09 1.68894679e-02 9.83110523e-01] [ 2.97052325e-11 7.26356627e-04 9.99273643e-01] [ 1.57464093e-06 1.57779528e-01 8.42218897e-01]]","title":"Predicting Class Probabilities"},{"location":"user_guide/classifier/SoftmaxRegression/#example-2-stochastic-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import SoftmaxRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = SoftmaxRegression(eta=0.01, epochs=300, minibatches=len(y), random_seed=1) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Softmax Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show()","title":"Example 2 - Stochastic Gradient Descent"},{"location":"user_guide/classifier/SoftmaxRegression/#api","text":"SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/","title":"API"},{"location":"user_guide/classifier/SoftmaxRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Methods"},{"location":"user_guide/classifier/StackingCVClassifier/","text":"StackingCVClassifier An ensemble-learning meta-classifier for stacking using cross-validation to prepare the inputs for the level-2 classifier to prevent overfitting. from mlxtend.classifier import StackingCVClassifier Overview Stacking is an ensemble learning technique to combine multiple classification models via a meta-classifier. The StackingCVClassifier extends the standard stacking algorithm (implemented as StackingClassifier ) using cross-validation to prepare the input data for the level-2 classifier. In the standard stacking procedure, the first-level classifiers are fit to the same training set that is used prepare the inputs for the second-level classifier, which may lead to overfitting. The StackingCVClassifier , however, uses the concept of cross-validation: the dataset is split into k folds, and in k successive rounds, k-1 folds are used to fit the first level classifier; in each round, the first-level classifiers are then applied to the remaining 1 subset that was not used for model fitting in each iteration. The resulting predictions are then stacked and provided -- as input data -- to the second-level classifier. After the training of the StackingCVClassifier , the first-level classifiers are fit to the entire dataset as illustrated in the figure below. More formally, the Stacking Cross-Validation algorithm can be summarized as follows (source: [1]): References [1] Tang, J., S. Alelyani, and H. Liu. \" Data Classification: Algorithms and Applications. \" Data Mining and Knowledge Discovery Series, CRC Press (2015): pp. 498-500. [2] Wolpert, David H. \" Stacked generalization. \" Neural networks 5.2 (1992): 241-259. Example 1 - Simple Stacking CV Classification from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import StackingCVClassifier import numpy as np RANDOM_SEED = 42 clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.90 (+/- 0.03) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.93 (+/- 0.02) [StackingClassifier] import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) for clf, lab, grd in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingCVClassifier'], itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) plt.show() Example 2 - Using Probabilities as Meta-Features Alternatively, the class-probabilities of the first-level classifiers can be used to train the meta-classifier (2nd-level classifier) by setting use_probas=True . For example, in a 3-class setting with 2 level-1 classifiers, these classifiers may make the following \"probability\" predictions for 1 training sample: classifier 1: [0.2, 0.5, 0.3] classifier 2: [0.3, 0.4, 0.4] This results in k features, where k = [n_classes * n_classifiers], by stacking these level-1 probabilities: [0.2, 0.5, 0.3, 0.3, 0.4, 0.4] clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], use_probas=True, meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.95 (+/- 0.04) [StackingClassifier] Example 3 - Stacked CV Classification and GridSearch To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from mlxtend.classifier import StackingCVClassifier # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.673 +/- 0.01 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.920 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.893 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.947 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.947 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} Accuracy: 0.95 In case we are planning to use a regression algorithm multiple times, all we need to do is to add an additional number suffix in the parameter grid as shown below: from sklearn.model_selection import GridSearchCV # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier-1__n_neighbors': [1, 5], 'kneighborsclassifier-2__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.673 +/- 0.01 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.920 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.893 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.947 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.953 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.927 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.940 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} Accuracy: 0.95 Note The StackingCVClassifier also enables grid search over the classifiers argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'classifiers': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] . Example 4 - Stacking of Classifiers that Operate on Different Feature Subsets The different level-1 classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import StackingCVClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) sclf = StackingCVClassifier(classifiers=[pipe1, pipe2], meta_classifier=LogisticRegression()) sclf.fit(X, y) StackingCVClassifier(classifiers=[Pipeline(steps=[('columnselector', ColumnSelector(cols=(0, 2))), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solve...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], cv=2, meta_classifier=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False), shuffle=True, stratify=True, use_features_in_secondary=False, use_probas=False, verbose=0) API StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingCVClassifier"},{"location":"user_guide/classifier/StackingCVClassifier/#stackingcvclassifier","text":"An ensemble-learning meta-classifier for stacking using cross-validation to prepare the inputs for the level-2 classifier to prevent overfitting. from mlxtend.classifier import StackingCVClassifier","title":"StackingCVClassifier"},{"location":"user_guide/classifier/StackingCVClassifier/#overview","text":"Stacking is an ensemble learning technique to combine multiple classification models via a meta-classifier. The StackingCVClassifier extends the standard stacking algorithm (implemented as StackingClassifier ) using cross-validation to prepare the input data for the level-2 classifier. In the standard stacking procedure, the first-level classifiers are fit to the same training set that is used prepare the inputs for the second-level classifier, which may lead to overfitting. The StackingCVClassifier , however, uses the concept of cross-validation: the dataset is split into k folds, and in k successive rounds, k-1 folds are used to fit the first level classifier; in each round, the first-level classifiers are then applied to the remaining 1 subset that was not used for model fitting in each iteration. The resulting predictions are then stacked and provided -- as input data -- to the second-level classifier. After the training of the StackingCVClassifier , the first-level classifiers are fit to the entire dataset as illustrated in the figure below. More formally, the Stacking Cross-Validation algorithm can be summarized as follows (source: [1]):","title":"Overview"},{"location":"user_guide/classifier/StackingCVClassifier/#references","text":"[1] Tang, J., S. Alelyani, and H. Liu. \" Data Classification: Algorithms and Applications. \" Data Mining and Knowledge Discovery Series, CRC Press (2015): pp. 498-500. [2] Wolpert, David H. \" Stacked generalization. \" Neural networks 5.2 (1992): 241-259.","title":"References"},{"location":"user_guide/classifier/StackingCVClassifier/#example-1-simple-stacking-cv-classification","text":"from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import StackingCVClassifier import numpy as np RANDOM_SEED = 42 clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.90 (+/- 0.03) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.93 (+/- 0.02) [StackingClassifier] import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) for clf, lab, grd in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingCVClassifier'], itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) plt.show()","title":"Example 1 - Simple Stacking CV Classification"},{"location":"user_guide/classifier/StackingCVClassifier/#example-2-using-probabilities-as-meta-features","text":"Alternatively, the class-probabilities of the first-level classifiers can be used to train the meta-classifier (2nd-level classifier) by setting use_probas=True . For example, in a 3-class setting with 2 level-1 classifiers, these classifiers may make the following \"probability\" predictions for 1 training sample: classifier 1: [0.2, 0.5, 0.3] classifier 2: [0.3, 0.4, 0.4] This results in k features, where k = [n_classes * n_classifiers], by stacking these level-1 probabilities: [0.2, 0.5, 0.3, 0.3, 0.4, 0.4] clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], use_probas=True, meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.95 (+/- 0.04) [StackingClassifier]","title":"Example 2 - Using Probabilities as Meta-Features"},{"location":"user_guide/classifier/StackingCVClassifier/#example-3-stacked-cv-classification-and-gridsearch","text":"To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from mlxtend.classifier import StackingCVClassifier # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.673 +/- 0.01 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.920 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.893 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.947 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.947 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} Accuracy: 0.95 In case we are planning to use a regression algorithm multiple times, all we need to do is to add an additional number suffix in the parameter grid as shown below: from sklearn.model_selection import GridSearchCV # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier-1__n_neighbors': [1, 5], 'kneighborsclassifier-2__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.673 +/- 0.01 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.920 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.893 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.947 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.953 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.927 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.940 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} Accuracy: 0.95 Note The StackingCVClassifier also enables grid search over the classifiers argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'classifiers': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] .","title":"Example 3 - Stacked CV Classification and GridSearch"},{"location":"user_guide/classifier/StackingCVClassifier/#example-4-stacking-of-classifiers-that-operate-on-different-feature-subsets","text":"The different level-1 classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import StackingCVClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) sclf = StackingCVClassifier(classifiers=[pipe1, pipe2], meta_classifier=LogisticRegression()) sclf.fit(X, y) StackingCVClassifier(classifiers=[Pipeline(steps=[('columnselector', ColumnSelector(cols=(0, 2))), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solve...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], cv=2, meta_classifier=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False), shuffle=True, stratify=True, use_features_in_secondary=False, use_probas=False, verbose=0)","title":"Example 4 - Stacking of Classifiers that Operate on Different Feature Subsets"},{"location":"user_guide/classifier/StackingCVClassifier/#api","text":"StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/","title":"API"},{"location":"user_guide/classifier/StackingCVClassifier/#methods","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"user_guide/classifier/StackingClassifier/","text":"StackingClassifier An ensemble-learning meta-classifier for stacking. from mlxtend.classifier import StackingClassifier Overview Stacking is an ensemble learning technique to combine multiple classification models via a meta-classifier. The individual classification models are trained based on the complete training set; then, the meta-classifier is fitted based on the outputs -- meta-features -- of the individual classification models in the ensemble. The meta-classifier can either be trained on the predicted class labels or probabilities from the ensemble. The algorithm can be summarized as follows (source: [1]): References [1] Tang, J., S. Alelyani, and H. Liu. \" Data Classification: Algorithms and Applications. \" Data Mining and Knowledge Discovery Series, CRC Press (2015): pp. 498-500. [2] Wolpert, David H. \" Stacked generalization. \" Neural networks 5.2 (1992): 241-259. Example 1 - Simple Stacked Classification from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import StackingClassifier import numpy as np clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.95 (+/- 0.03) [StackingClassifier] import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) for clf, lab, grd in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier'], itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) Example 2 - Using Probabilities as Meta-Features Alternatively, the class-probabilities of the first-level classifiers can be used to train the meta-classifier (2nd-level classifier) by setting use_probas=True . If average_probas=True , the probabilities of the level-1 classifiers are averaged, if average_probas=False , the probabilities are stacked (recommended). For example, in a 3-class setting with 2 level-1 classifiers, these classifiers may make the following \"probability\" predictions for 1 training sample: classifier 1: [0.2, 0.5, 0.3] classifier 2: [0.3, 0.4, 0.4] If average_probas=True , the meta-features would be: [0.25, 0.45, 0.35] In contrast, using average_probas=False results in k features where, k = [n_classes * n_classifiers], by stacking these level-1 probabilities: [0.2, 0.5, 0.3, 0.3, 0.4, 0.4] clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], use_probas=True, average_probas=False, meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.94 (+/- 0.03) [StackingClassifier] Example 3 - Stacked Classification and GridSearch To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from mlxtend.classifier import StackingClassifier # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.933 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Accuracy: 0.94 In case we are planning to use a regression algorithm multiple times, all we need to do is to add an additional number suffix in the parameter grid as shown below: from sklearn.model_selection import GridSearchCV # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier-1__n_neighbors': [1, 5], 'kneighborsclassifier-2__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.907 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.933 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Accuracy: 0.94 Note The StackingClassifier also enables grid search over the classifiers argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'classifiers': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] . Example 4 - Stacking of Classifiers that Operate on Different Feature Subsets The different level-1 classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import StackingClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) sclf = StackingClassifier(classifiers=[pipe1, pipe2], meta_classifier=LogisticRegression()) sclf.fit(X, y) StackingClassifier(average_probas=False, classifiers=[Pipeline(steps=[('columnselector', ColumnSelector(cols=(0, 2))), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solve...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], meta_classifier=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False), use_features_in_secondary=False, use_probas=False, verbose=0) API StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/ Methods fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingClassifier"},{"location":"user_guide/classifier/StackingClassifier/#stackingclassifier","text":"An ensemble-learning meta-classifier for stacking. from mlxtend.classifier import StackingClassifier","title":"StackingClassifier"},{"location":"user_guide/classifier/StackingClassifier/#overview","text":"Stacking is an ensemble learning technique to combine multiple classification models via a meta-classifier. The individual classification models are trained based on the complete training set; then, the meta-classifier is fitted based on the outputs -- meta-features -- of the individual classification models in the ensemble. The meta-classifier can either be trained on the predicted class labels or probabilities from the ensemble. The algorithm can be summarized as follows (source: [1]):","title":"Overview"},{"location":"user_guide/classifier/StackingClassifier/#references","text":"[1] Tang, J., S. Alelyani, and H. Liu. \" Data Classification: Algorithms and Applications. \" Data Mining and Knowledge Discovery Series, CRC Press (2015): pp. 498-500. [2] Wolpert, David H. \" Stacked generalization. \" Neural networks 5.2 (1992): 241-259.","title":"References"},{"location":"user_guide/classifier/StackingClassifier/#example-1-simple-stacked-classification","text":"from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import StackingClassifier import numpy as np clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.95 (+/- 0.03) [StackingClassifier] import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) for clf, lab, grd in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier'], itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab)","title":"Example 1 - Simple Stacked Classification"},{"location":"user_guide/classifier/StackingClassifier/#example-2-using-probabilities-as-meta-features","text":"Alternatively, the class-probabilities of the first-level classifiers can be used to train the meta-classifier (2nd-level classifier) by setting use_probas=True . If average_probas=True , the probabilities of the level-1 classifiers are averaged, if average_probas=False , the probabilities are stacked (recommended). For example, in a 3-class setting with 2 level-1 classifiers, these classifiers may make the following \"probability\" predictions for 1 training sample: classifier 1: [0.2, 0.5, 0.3] classifier 2: [0.3, 0.4, 0.4] If average_probas=True , the meta-features would be: [0.25, 0.45, 0.35] In contrast, using average_probas=False results in k features where, k = [n_classes * n_classifiers], by stacking these level-1 probabilities: [0.2, 0.5, 0.3, 0.3, 0.4, 0.4] clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], use_probas=True, average_probas=False, meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.94 (+/- 0.03) [StackingClassifier]","title":"Example 2 - Using Probabilities as Meta-Features"},{"location":"user_guide/classifier/StackingClassifier/#example-3-stacked-classification-and-gridsearch","text":"To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from mlxtend.classifier import StackingClassifier # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.933 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Accuracy: 0.94 In case we are planning to use a regression algorithm multiple times, all we need to do is to add an additional number suffix in the parameter grid as shown below: from sklearn.model_selection import GridSearchCV # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier-1__n_neighbors': [1, 5], 'kneighborsclassifier-2__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.907 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.933 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Accuracy: 0.94 Note The StackingClassifier also enables grid search over the classifiers argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'classifiers': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] .","title":"Example 3 - Stacked Classification and GridSearch"},{"location":"user_guide/classifier/StackingClassifier/#example-4-stacking-of-classifiers-that-operate-on-different-feature-subsets","text":"The different level-1 classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import StackingClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) sclf = StackingClassifier(classifiers=[pipe1, pipe2], meta_classifier=LogisticRegression()) sclf.fit(X, y) StackingClassifier(average_probas=False, classifiers=[Pipeline(steps=[('columnselector', ColumnSelector(cols=(0, 2))), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solve...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], meta_classifier=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False), use_features_in_secondary=False, use_probas=False, verbose=0)","title":"Example 4 - Stacking of Classifiers that Operate on Different Feature Subsets"},{"location":"user_guide/classifier/StackingClassifier/#api","text":"StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/","title":"API"},{"location":"user_guide/classifier/StackingClassifier/#methods","text":"fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"user_guide/cluster/Kmeans/","text":"Kmeans A implementation of k-means clustering. from mlxtend.cluster import Kmeans Overview Clustering falls into the category of unsupervised learning, a subfield of machine learning where the ground truth labels are not available to us in real-world applications. In clustering, our goal is to group samples by similarity (in k-means: Euclidean distance). The k-means algorithms can be summarized as follows: Randomly pick k centroids from the sample points as initial cluster centers. Assign each sample to the nearest centroid \\mu(j), \\; j \\in {1,...,k} . Move the centroids to the center of the samples that were assigned to it. Repeat steps 2 and 3 until the cluster assignments do not change or a user-defined tolerance or a maximum number of iterations is reached. References MacQueen, J. B. (1967). Some Methods for classification and Analysis of Multivariate Observations . Proceedings of 5th Berkeley Symposium on Mathematical Statistics and Probability. University of California Press. pp. 281\u2013297. MR 0214227. Zbl 0214.46201. Retrieved 2009-04-07. Example 1 - Three Blobs Load some sample data: import matplotlib.pyplot as plt from mlxtend.data import three_blobs_data X, y = three_blobs_data() plt.scatter(X[:, 0], X[:, 1], c='white') plt.show() Compute the cluster centroids: from mlxtend.cluster import Kmeans km = Kmeans(k=3, max_iter=50, random_seed=1, print_progress=3) km.fit(X) print('Iterations until convergence:', km.iterations_) print('Final centroids:\\n', km.centroids_) Iteration: 2/50 | Elapsed: 00:00:00 | ETA: 00:00:00 Iterations until convergence: 2 Final centroids: [[-1.5947298 2.92236966] [ 2.06521743 0.96137409] [ 0.9329651 4.35420713]] Visualize the cluster memberships: y_clust = km.predict(X) plt.scatter(X[y_clust == 0, 0], X[y_clust == 0, 1], s=50, c='lightgreen', marker='s', label='cluster 1') plt.scatter(X[y_clust == 1,0], X[y_clust == 1,1], s=50, c='orange', marker='o', label='cluster 2') plt.scatter(X[y_clust == 2,0], X[y_clust == 2,1], s=50, c='lightblue', marker='v', label='cluster 3') plt.scatter(km.centroids_[:,0], km.centroids_[:,1], s=250, marker='*', c='red', label='centroids') plt.legend(loc='lower left', scatterpoints=1) plt.grid() plt.show() API Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/ Methods fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values.","title":"Kmeans"},{"location":"user_guide/cluster/Kmeans/#kmeans","text":"A implementation of k-means clustering. from mlxtend.cluster import Kmeans","title":"Kmeans"},{"location":"user_guide/cluster/Kmeans/#overview","text":"Clustering falls into the category of unsupervised learning, a subfield of machine learning where the ground truth labels are not available to us in real-world applications. In clustering, our goal is to group samples by similarity (in k-means: Euclidean distance). The k-means algorithms can be summarized as follows: Randomly pick k centroids from the sample points as initial cluster centers. Assign each sample to the nearest centroid \\mu(j), \\; j \\in {1,...,k} . Move the centroids to the center of the samples that were assigned to it. Repeat steps 2 and 3 until the cluster assignments do not change or a user-defined tolerance or a maximum number of iterations is reached.","title":"Overview"},{"location":"user_guide/cluster/Kmeans/#references","text":"MacQueen, J. B. (1967). Some Methods for classification and Analysis of Multivariate Observations . Proceedings of 5th Berkeley Symposium on Mathematical Statistics and Probability. University of California Press. pp. 281\u2013297. MR 0214227. Zbl 0214.46201. Retrieved 2009-04-07.","title":"References"},{"location":"user_guide/cluster/Kmeans/#example-1-three-blobs","text":"","title":"Example 1 - Three Blobs"},{"location":"user_guide/cluster/Kmeans/#load-some-sample-data","text":"import matplotlib.pyplot as plt from mlxtend.data import three_blobs_data X, y = three_blobs_data() plt.scatter(X[:, 0], X[:, 1], c='white') plt.show()","title":"Load some sample data:"},{"location":"user_guide/cluster/Kmeans/#compute-the-cluster-centroids","text":"from mlxtend.cluster import Kmeans km = Kmeans(k=3, max_iter=50, random_seed=1, print_progress=3) km.fit(X) print('Iterations until convergence:', km.iterations_) print('Final centroids:\\n', km.centroids_) Iteration: 2/50 | Elapsed: 00:00:00 | ETA: 00:00:00 Iterations until convergence: 2 Final centroids: [[-1.5947298 2.92236966] [ 2.06521743 0.96137409] [ 0.9329651 4.35420713]]","title":"Compute the cluster centroids:"},{"location":"user_guide/cluster/Kmeans/#visualize-the-cluster-memberships","text":"y_clust = km.predict(X) plt.scatter(X[y_clust == 0, 0], X[y_clust == 0, 1], s=50, c='lightgreen', marker='s', label='cluster 1') plt.scatter(X[y_clust == 1,0], X[y_clust == 1,1], s=50, c='orange', marker='o', label='cluster 2') plt.scatter(X[y_clust == 2,0], X[y_clust == 2,1], s=50, c='lightblue', marker='v', label='cluster 3') plt.scatter(km.centroids_[:,0], km.centroids_[:,1], s=250, marker='*', c='red', label='centroids') plt.legend(loc='lower left', scatterpoints=1) plt.grid() plt.show()","title":"Visualize the cluster memberships:"},{"location":"user_guide/cluster/Kmeans/#api","text":"Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/","title":"API"},{"location":"user_guide/cluster/Kmeans/#methods","text":"fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values.","title":"Methods"},{"location":"user_guide/data/autompg_data/","text":"Auto MPG A function that loads the autompg dataset into NumPy arrays. from mlxtend.data import autompg_data Overview The Auto-MPG dataset for regression analysis. The target ( y ) is defined as the miles per gallon (mpg) for 392 automobiles (6 rows containing \"NaN\"s have been removed. The 8 feature columns are: Features cylinders: multi-valued discrete displacement: continuous horsepower: continuous weight: continuous acceleration: continuous model year: multi-valued discrete origin: multi-valued discrete car name: string (unique for each instance) Number of samples: 392 Target variable (continuous): mpg References Source: https://archive.ics.uci.edu/ml/datasets/Auto+MPG Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann. Example - Dataset overview from mlxtend.data import autompg_data X, y = autompg_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin', 'car name']) print('1st row', X[0]) Dimensions: 392 x 8 Header: ['cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin', 'car name'] 1st row [ 8.00000000e+00 3.07000000e+02 1.30000000e+02 3.50400000e+03 1.20000000e+01 7.00000000e+01 1.00000000e+00 nan] Note that the feature array contains a str column (\"car name\"), thus it is recommended to pick the features as needed and convert it into a float array for further analysis. The example below shows how to get rid of the car name column and cast the NumPy array as a float array. X[:, :-1].astype(float) array([[ 8. , 307. , 130. , ..., 12. , 70. , 1. ], [ 8. , 350. , 165. , ..., 11.5, 70. , 1. ], [ 8. , 318. , 150. , ..., 11. , 70. , 1. ], ..., [ 4. , 135. , 84. , ..., 11.6, 82. , 1. ], [ 4. , 120. , 79. , ..., 18.6, 82. , 1. ], [ 4. , 119. , 82. , ..., 19.4, 82. , 1. ]]) API autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/","title":"Auto MPG"},{"location":"user_guide/data/autompg_data/#auto-mpg","text":"A function that loads the autompg dataset into NumPy arrays. from mlxtend.data import autompg_data","title":"Auto MPG"},{"location":"user_guide/data/autompg_data/#overview","text":"The Auto-MPG dataset for regression analysis. The target ( y ) is defined as the miles per gallon (mpg) for 392 automobiles (6 rows containing \"NaN\"s have been removed. The 8 feature columns are: Features cylinders: multi-valued discrete displacement: continuous horsepower: continuous weight: continuous acceleration: continuous model year: multi-valued discrete origin: multi-valued discrete car name: string (unique for each instance) Number of samples: 392 Target variable (continuous): mpg","title":"Overview"},{"location":"user_guide/data/autompg_data/#references","text":"Source: https://archive.ics.uci.edu/ml/datasets/Auto+MPG Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.","title":"References"},{"location":"user_guide/data/autompg_data/#example-dataset-overview","text":"from mlxtend.data import autompg_data X, y = autompg_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin', 'car name']) print('1st row', X[0]) Dimensions: 392 x 8 Header: ['cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin', 'car name'] 1st row [ 8.00000000e+00 3.07000000e+02 1.30000000e+02 3.50400000e+03 1.20000000e+01 7.00000000e+01 1.00000000e+00 nan] Note that the feature array contains a str column (\"car name\"), thus it is recommended to pick the features as needed and convert it into a float array for further analysis. The example below shows how to get rid of the car name column and cast the NumPy array as a float array. X[:, :-1].astype(float) array([[ 8. , 307. , 130. , ..., 12. , 70. , 1. ], [ 8. , 350. , 165. , ..., 11.5, 70. , 1. ], [ 8. , 318. , 150. , ..., 11. , 70. , 1. ], ..., [ 4. , 135. , 84. , ..., 11.6, 82. , 1. ], [ 4. , 120. , 79. , ..., 18.6, 82. , 1. ], [ 4. , 119. , 82. , ..., 19.4, 82. , 1. ]])","title":"Example - Dataset overview"},{"location":"user_guide/data/autompg_data/#api","text":"autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/","title":"API"},{"location":"user_guide/data/boston_housing_data/","text":"Boston Housing Data A function that loads the boston_housing_data dataset into NumPy arrays. from mlxtend.data import boston_housing_data Overview The Boston Housing dataset for regression analysis. Features CRIM: per capita crime rate by town ZN: proportion of residential land zoned for lots over 25,000 sq.ft. INDUS: proportion of non-retail business acres per town CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) NOX: nitric oxides concentration (parts per 10 million) RM: average number of rooms per dwelling AGE: proportion of owner-occupied units built prior to 1940 DIS: weighted distances to five Boston employment centres RAD: index of accessibility to radial highways TAX: full-value property-tax rate per $10,000 PTRATIO: pupil-teacher ratio by town B: 1000(Bk - 0.63)^2 where Bk is the proportion of b. by town LSTAT: % lower status of the population Number of samples: 506 Target variable (continuous): MEDV, Median value of owner-occupied homes in $1000's References Source: https://archive.ics.uci.edu/ml/datasets/Wine Harrison, D. and Rubinfeld, D.L. 'Hedonic prices and the demand for clean air', J. Environ. Economics & Management, vol.5, 81-102, 1978. Example 1 - Dataset overview from mlxtend.data import boston_housing_data X, y = boston_housing_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) (506, 14) Dimensions: 506 x 13 1st row [ 6.32000000e-03 1.80000000e+01 2.31000000e+00 0.00000000e+00 5.38000000e-01 6.57500000e+00 6.52000000e+01 4.09000000e+00 1.00000000e+00 2.96000000e+02 1.53000000e+01 3.96900000e+02 4.98000000e+00] API boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/","title":"Boston Housing Data"},{"location":"user_guide/data/boston_housing_data/#boston-housing-data","text":"A function that loads the boston_housing_data dataset into NumPy arrays. from mlxtend.data import boston_housing_data","title":"Boston Housing Data"},{"location":"user_guide/data/boston_housing_data/#overview","text":"The Boston Housing dataset for regression analysis. Features CRIM: per capita crime rate by town ZN: proportion of residential land zoned for lots over 25,000 sq.ft. INDUS: proportion of non-retail business acres per town CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) NOX: nitric oxides concentration (parts per 10 million) RM: average number of rooms per dwelling AGE: proportion of owner-occupied units built prior to 1940 DIS: weighted distances to five Boston employment centres RAD: index of accessibility to radial highways TAX: full-value property-tax rate per $10,000 PTRATIO: pupil-teacher ratio by town B: 1000(Bk - 0.63)^2 where Bk is the proportion of b. by town LSTAT: % lower status of the population Number of samples: 506 Target variable (continuous): MEDV, Median value of owner-occupied homes in $1000's","title":"Overview"},{"location":"user_guide/data/boston_housing_data/#references","text":"Source: https://archive.ics.uci.edu/ml/datasets/Wine Harrison, D. and Rubinfeld, D.L. 'Hedonic prices and the demand for clean air', J. Environ. Economics & Management, vol.5, 81-102, 1978.","title":"References"},{"location":"user_guide/data/boston_housing_data/#example-1-dataset-overview","text":"from mlxtend.data import boston_housing_data X, y = boston_housing_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) (506, 14) Dimensions: 506 x 13 1st row [ 6.32000000e-03 1.80000000e+01 2.31000000e+00 0.00000000e+00 5.38000000e-01 6.57500000e+00 6.52000000e+01 4.09000000e+00 1.00000000e+00 2.96000000e+02 1.53000000e+01 3.96900000e+02 4.98000000e+00]","title":"Example 1 - Dataset overview"},{"location":"user_guide/data/boston_housing_data/#api","text":"boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/","title":"API"},{"location":"user_guide/data/iris_data/","text":"Iris Dataset A function that loads the iris dataset into NumPy arrays. from mlxtend.data import iris_data Overview The Iris dataset for classification. Features Sepal length Sepal width Petal length Petal width Number of samples: 150 Target variable (discrete): {50x Setosa, 50x Versicolor, 50x Virginica} References Source: https://archive.ics.uci.edu/ml/datasets/Iris Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository. Irvine, CA: University of California, School of Information and Computer Science. Example 1 - Dataset overview from mlxtend.data import iris_data X, y = iris_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['sepal length', 'sepal width', 'petal length', 'petal width']) print('1st row', X[0]) Dimensions: 150 x 4 Header: ['sepal length', 'sepal width', 'petal length', 'petal width'] 1st row [ 5.1 3.5 1.4 0.2] import numpy as np print('Classes: Setosa, Versicolor, Virginica') print(np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: Setosa, Versicolor, Virginica [0 1 2] Class distribution: [50 50 50] API iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/","title":"Iris Dataset"},{"location":"user_guide/data/iris_data/#iris-dataset","text":"A function that loads the iris dataset into NumPy arrays. from mlxtend.data import iris_data","title":"Iris Dataset"},{"location":"user_guide/data/iris_data/#overview","text":"The Iris dataset for classification. Features Sepal length Sepal width Petal length Petal width Number of samples: 150 Target variable (discrete): {50x Setosa, 50x Versicolor, 50x Virginica}","title":"Overview"},{"location":"user_guide/data/iris_data/#references","text":"Source: https://archive.ics.uci.edu/ml/datasets/Iris Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository. Irvine, CA: University of California, School of Information and Computer Science.","title":"References"},{"location":"user_guide/data/iris_data/#example-1-dataset-overview","text":"from mlxtend.data import iris_data X, y = iris_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['sepal length', 'sepal width', 'petal length', 'petal width']) print('1st row', X[0]) Dimensions: 150 x 4 Header: ['sepal length', 'sepal width', 'petal length', 'petal width'] 1st row [ 5.1 3.5 1.4 0.2] import numpy as np print('Classes: Setosa, Versicolor, Virginica') print(np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: Setosa, Versicolor, Virginica [0 1 2] Class distribution: [50 50 50]","title":"Example 1 - Dataset overview"},{"location":"user_guide/data/iris_data/#api","text":"iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/","title":"API"},{"location":"user_guide/data/loadlocal_mnist/","text":"Load the MNIST Dataset from Local Files A utility function that loads the MNIST dataset from byte-form into NumPy arrays. from mlxtend.data import loadlocal_mnist Overview The MNIST dataset was constructed from two datasets of the US National Institute of Standards and Technology (NIST). The training set consists of handwritten digits from 250 different people, 50 percent high school students, and 50 percent employees from the Census Bureau. Note that the test set contains handwritten digits from different people following the same split. The MNIST dataset is publicly available at http://yann.lecun.com/exdb/mnist/ and consists of the following four parts: - Training set images: train-images-idx3-ubyte.gz (9.9 MB, 47 MB unzipped, and 60,000 samples) - Training set labels: train-labels-idx1-ubyte.gz (29 KB, 60 KB unzipped, and 60,000 labels) - Test set images: t10k-images-idx3-ubyte.gz (1.6 MB, 7.8 MB, unzipped and 10,000 samples) - Test set labels: t10k-labels-idx1-ubyte.gz (5 KB, 10 KB unzipped, and 10,000 labels) Features Each feature vector (row in the feature matrix) consists of 784 pixels (intensities) -- unrolled from the original 28x28 pixels images. Number of samples: 50000 images Target variable (discrete): {50x Setosa, 50x Versicolor, 50x Virginica} References Source: http://yann.lecun.com/exdb/mnist/ Y. LeCun and C. Cortes. Mnist handwritten digit database. AT&T Labs [Online]. Available: http://yann. lecun. com/exdb/mnist, 2010. Example 1 Part 1 - Downloading the MNIST dataset 1) Download the MNIST files from Y. LeCun's website http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz for example, via curl -O http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz 2) Unzip the downloaded gzip archives for example, via gunzip t*-ubyte.gz Example 1 Part 2 - Loading MNIST into NumPy Arrays from mlxtend.data import loadlocal_mnist X, y = loadlocal_mnist( images_path='/Users/Sebastian/Desktop/train-images-idx3-ubyte', labels_path='/Users/Sebastian/Desktop/train-labels-idx1-ubyte') print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\n1st row', X[0]) Dimensions: 60000 x 784 1st row [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 18 18 18 126 136 175 26 166 255 247 127 0 0 0 0 0 0 0 0 0 0 0 0 30 36 94 154 170 253 253 253 253 253 225 172 253 242 195 64 0 0 0 0 0 0 0 0 0 0 0 49 238 253 253 253 253 253 253 253 253 251 93 82 82 56 39 0 0 0 0 0 0 0 0 0 0 0 0 18 219 253 253 253 253 253 198 182 247 241 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 80 156 107 253 253 205 11 0 43 154 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 1 154 253 90 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 139 253 190 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 190 253 70 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 35 241 225 160 108 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 81 240 253 253 119 25 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 45 186 253 253 150 27 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 93 252 253 187 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 249 253 249 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 46 130 183 253 253 207 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 39 148 229 253 253 253 250 182 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 24 114 221 253 253 253 253 201 78 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 23 66 213 253 253 253 253 198 81 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 18 171 219 253 253 253 253 195 80 9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 172 226 253 253 253 253 244 133 11 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 136 253 253 253 212 135 132 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] import numpy as np print('Digits: 0 1 2 3 4 5 6 7 8 9') print('labels: %s' % np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Digits: 0 1 2 3 4 5 6 7 8 9 labels: [0 1 2 3 4 5 6 7 8 9] Class distribution: [5923 6742 5958 6131 5842 5421 5918 6265 5851 5949] Store as CSV Files np.savetxt(fname='/Users/Sebastian/Desktop/images.csv', X=X, delimiter=',', fmt='%d') np.savetxt(fname='/Users/Sebastian/Desktop/labels.csv', X=y, delimiter=',', fmt='%d') API loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/","title":"Load the MNIST Dataset from Local Files"},{"location":"user_guide/data/loadlocal_mnist/#load-the-mnist-dataset-from-local-files","text":"A utility function that loads the MNIST dataset from byte-form into NumPy arrays. from mlxtend.data import loadlocal_mnist","title":"Load the MNIST Dataset from Local Files"},{"location":"user_guide/data/loadlocal_mnist/#overview","text":"The MNIST dataset was constructed from two datasets of the US National Institute of Standards and Technology (NIST). The training set consists of handwritten digits from 250 different people, 50 percent high school students, and 50 percent employees from the Census Bureau. Note that the test set contains handwritten digits from different people following the same split. The MNIST dataset is publicly available at http://yann.lecun.com/exdb/mnist/ and consists of the following four parts: - Training set images: train-images-idx3-ubyte.gz (9.9 MB, 47 MB unzipped, and 60,000 samples) - Training set labels: train-labels-idx1-ubyte.gz (29 KB, 60 KB unzipped, and 60,000 labels) - Test set images: t10k-images-idx3-ubyte.gz (1.6 MB, 7.8 MB, unzipped and 10,000 samples) - Test set labels: t10k-labels-idx1-ubyte.gz (5 KB, 10 KB unzipped, and 10,000 labels) Features Each feature vector (row in the feature matrix) consists of 784 pixels (intensities) -- unrolled from the original 28x28 pixels images. Number of samples: 50000 images Target variable (discrete): {50x Setosa, 50x Versicolor, 50x Virginica}","title":"Overview"},{"location":"user_guide/data/loadlocal_mnist/#references","text":"Source: http://yann.lecun.com/exdb/mnist/ Y. LeCun and C. Cortes. Mnist handwritten digit database. AT&T Labs [Online]. Available: http://yann. lecun. com/exdb/mnist, 2010.","title":"References"},{"location":"user_guide/data/loadlocal_mnist/#example-1-part-1-downloading-the-mnist-dataset","text":"1) Download the MNIST files from Y. LeCun's website http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz for example, via curl -O http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz 2) Unzip the downloaded gzip archives for example, via gunzip t*-ubyte.gz","title":"Example 1 Part 1 - Downloading the MNIST dataset"},{"location":"user_guide/data/loadlocal_mnist/#example-1-part-2-loading-mnist-into-numpy-arrays","text":"from mlxtend.data import loadlocal_mnist X, y = loadlocal_mnist( images_path='/Users/Sebastian/Desktop/train-images-idx3-ubyte', labels_path='/Users/Sebastian/Desktop/train-labels-idx1-ubyte') print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\n1st row', X[0]) Dimensions: 60000 x 784 1st row [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 18 18 18 126 136 175 26 166 255 247 127 0 0 0 0 0 0 0 0 0 0 0 0 30 36 94 154 170 253 253 253 253 253 225 172 253 242 195 64 0 0 0 0 0 0 0 0 0 0 0 49 238 253 253 253 253 253 253 253 253 251 93 82 82 56 39 0 0 0 0 0 0 0 0 0 0 0 0 18 219 253 253 253 253 253 198 182 247 241 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 80 156 107 253 253 205 11 0 43 154 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 1 154 253 90 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 139 253 190 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 190 253 70 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 35 241 225 160 108 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 81 240 253 253 119 25 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 45 186 253 253 150 27 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 93 252 253 187 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 249 253 249 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 46 130 183 253 253 207 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 39 148 229 253 253 253 250 182 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 24 114 221 253 253 253 253 201 78 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 23 66 213 253 253 253 253 198 81 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 18 171 219 253 253 253 253 195 80 9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 172 226 253 253 253 253 244 133 11 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 136 253 253 253 212 135 132 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] import numpy as np print('Digits: 0 1 2 3 4 5 6 7 8 9') print('labels: %s' % np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Digits: 0 1 2 3 4 5 6 7 8 9 labels: [0 1 2 3 4 5 6 7 8 9] Class distribution: [5923 6742 5958 6131 5842 5421 5918 6265 5851 5949]","title":"Example 1 Part 2 - Loading MNIST into NumPy Arrays"},{"location":"user_guide/data/loadlocal_mnist/#store-as-csv-files","text":"np.savetxt(fname='/Users/Sebastian/Desktop/images.csv', X=X, delimiter=',', fmt='%d') np.savetxt(fname='/Users/Sebastian/Desktop/labels.csv', X=y, delimiter=',', fmt='%d')","title":"Store as CSV Files"},{"location":"user_guide/data/loadlocal_mnist/#api","text":"loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/","title":"API"},{"location":"user_guide/data/make_multiplexer_dataset/","text":"Make Multiplexer Dataset Function that creates a dataset generated by a n-bit Boolean multiplexer for evaluating supervised learning algorithms. from mlxtend.data import make_multiplexer_dataset Overview The make_multiplexer_dataset function creates a dataset generated by an n-bit Boolean multiplexer. Such dataset represents a dataset generated by a simple rule, based on the behavior of a electric multiplexer, yet presents a relatively challenging classification problem for supervised learning algorithm with interactions between features (epistasis) as it may be encountered in many real-world scenarios [1]. The following illustration depicts a 6-bit multiplexer that consists of 2 address bits and 4 register bits. The address bits converted to decimal representation point to a position in the register bit. For example, if the address bits are \"00\" (0 in decimal), the address bits point to the register bit at position 0. The value of the register position pointed to determines the class label. For example, if the register bit at position is 0, the class label is 0. Vice versa, if the register bit at position 0 is 1, the class label is 1. In the example above, the address bits \"10\" (2 in decimal) point to the 3rd register position (as we start counting from index 0), which has a bit value of 1. Hence, the class label is 1. Below are a few more examples: Address bits: [0, 1], register bits: [1, 0, 1, 1], class label: 0 Address bits: [0, 1], register bits: [1, 1, 1, 0], class label: 1 Address bits: [1, 0], register bits: [1, 0, 0, 1], class label: 0 Address bits: [1, 1], register bits: [1, 1, 1, 0], class label: 0 Address bits: [0, 1], register bits: [0, 1, 1, 0], class label: 1 Address bits: [0, 1], register bits: [1, 0, 0, 1], class label: 0 Address bits: [0, 1], register bits: [0, 1, 1, 1], class label: 1 Address bits: [0, 1], register bits: [0, 0, 0, 0], class label: 0 Address bits: [1, 0], register bits: [1, 0, 1, 1], class label: 1 Address bits: [0, 1], register bits: [1, 1, 1, 1], class label: 1 Note that in the implementation of the multiplexer function, if the number of address bits is set to 2, this results in a 6 bit multiplexer as two bit can have 2^2=4 different register positions (2 bit + 4 bit = 6 bit). However, if we choose 3 address bits instead, 2^3=8 positions would be covered, resulting in a 11 bit (3 bit + 8 bit = 11 bit) multiplexer, and so forth. References [1] Urbanowicz, R. J., & Browne, W. N. (2017). Introduction to Learning Classifier Systems . Springer. Example 1 -- 6-bit multiplexer This simple example illustrates how to create dataset from a 6-bit multiplexer import numpy as np from mlxtend.data import make_multiplexer_dataset X, y = make_multiplexer_dataset(address_bits=2, sample_size=10, positive_class_ratio=0.5, shuffle=False, random_seed=123) print('Features:\\n', X) print('\\nClass labels:\\n', y) Features: [[0 1 0 1 0 1] [1 0 0 0 1 1] [0 1 1 1 0 0] [0 1 1 1 0 0] [0 0 1 1 0 0] [0 1 0 0 0 0] [0 1 1 0 1 1] [1 0 1 0 0 0] [1 0 0 1 0 1] [1 0 1 0 0 1]] Class labels: [1 1 1 1 1 0 0 0 0 0] API make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset","title":"Make Multiplexer Dataset"},{"location":"user_guide/data/make_multiplexer_dataset/#make-multiplexer-dataset","text":"Function that creates a dataset generated by a n-bit Boolean multiplexer for evaluating supervised learning algorithms. from mlxtend.data import make_multiplexer_dataset","title":"Make Multiplexer Dataset"},{"location":"user_guide/data/make_multiplexer_dataset/#overview","text":"The make_multiplexer_dataset function creates a dataset generated by an n-bit Boolean multiplexer. Such dataset represents a dataset generated by a simple rule, based on the behavior of a electric multiplexer, yet presents a relatively challenging classification problem for supervised learning algorithm with interactions between features (epistasis) as it may be encountered in many real-world scenarios [1]. The following illustration depicts a 6-bit multiplexer that consists of 2 address bits and 4 register bits. The address bits converted to decimal representation point to a position in the register bit. For example, if the address bits are \"00\" (0 in decimal), the address bits point to the register bit at position 0. The value of the register position pointed to determines the class label. For example, if the register bit at position is 0, the class label is 0. Vice versa, if the register bit at position 0 is 1, the class label is 1. In the example above, the address bits \"10\" (2 in decimal) point to the 3rd register position (as we start counting from index 0), which has a bit value of 1. Hence, the class label is 1. Below are a few more examples: Address bits: [0, 1], register bits: [1, 0, 1, 1], class label: 0 Address bits: [0, 1], register bits: [1, 1, 1, 0], class label: 1 Address bits: [1, 0], register bits: [1, 0, 0, 1], class label: 0 Address bits: [1, 1], register bits: [1, 1, 1, 0], class label: 0 Address bits: [0, 1], register bits: [0, 1, 1, 0], class label: 1 Address bits: [0, 1], register bits: [1, 0, 0, 1], class label: 0 Address bits: [0, 1], register bits: [0, 1, 1, 1], class label: 1 Address bits: [0, 1], register bits: [0, 0, 0, 0], class label: 0 Address bits: [1, 0], register bits: [1, 0, 1, 1], class label: 1 Address bits: [0, 1], register bits: [1, 1, 1, 1], class label: 1 Note that in the implementation of the multiplexer function, if the number of address bits is set to 2, this results in a 6 bit multiplexer as two bit can have 2^2=4 different register positions (2 bit + 4 bit = 6 bit). However, if we choose 3 address bits instead, 2^3=8 positions would be covered, resulting in a 11 bit (3 bit + 8 bit = 11 bit) multiplexer, and so forth.","title":"Overview"},{"location":"user_guide/data/make_multiplexer_dataset/#references","text":"[1] Urbanowicz, R. J., & Browne, W. N. (2017). Introduction to Learning Classifier Systems . Springer.","title":"References"},{"location":"user_guide/data/make_multiplexer_dataset/#example-1-6-bit-multiplexer","text":"This simple example illustrates how to create dataset from a 6-bit multiplexer import numpy as np from mlxtend.data import make_multiplexer_dataset X, y = make_multiplexer_dataset(address_bits=2, sample_size=10, positive_class_ratio=0.5, shuffle=False, random_seed=123) print('Features:\\n', X) print('\\nClass labels:\\n', y) Features: [[0 1 0 1 0 1] [1 0 0 0 1 1] [0 1 1 1 0 0] [0 1 1 1 0 0] [0 0 1 1 0 0] [0 1 0 0 0 0] [0 1 1 0 1 1] [1 0 1 0 0 0] [1 0 0 1 0 1] [1 0 1 0 0 1]] Class labels: [1 1 1 1 1 0 0 0 0 0]","title":"Example 1 -- 6-bit multiplexer"},{"location":"user_guide/data/make_multiplexer_dataset/#api","text":"make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset","title":"API"},{"location":"user_guide/data/mnist_data/","text":"MNIST Dataset A function that loads the MNIST dataset into NumPy arrays. from mlxtend.data import mnist_data Overview The MNIST dataset was constructed from two datasets of the US National Institute of Standards and Technology (NIST). The training set consists of handwritten digits from 250 different people, 50 percent high school students, and 50 percent employees from the Census Bureau. Note that the test set contains handwritten digits from different people following the same split. Features Each feature vector (row in the feature matrix) consists of 784 pixels (intensities) -- unrolled from the original 28x28 pixels images. Number of samples: A subset of 5000 images (the first 500 digits of each class) Target variable (discrete): {500x 0, ..., 500x 9} References Source: http://yann.lecun.com/exdb/mnist/ Y. LeCun and C. Cortes. Mnist handwritten digit database. AT&T Labs [Online]. Available: http://yann.lecun.com/exdb/mnist , 2010. Example 1 - Dataset overview from mlxtend.data import mnist_data X, y = mnist_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) Dimensions: 5000 x 784 1st row [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 51. 159. 253. 159. 50. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 48. 238. 252. 252. 252. 237. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 54. 227. 253. 252. 239. 233. 252. 57. 6. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 10. 60. 224. 252. 253. 252. 202. 84. 252. 253. 122. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 163. 252. 252. 252. 253. 252. 252. 96. 189. 253. 167. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 51. 238. 253. 253. 190. 114. 253. 228. 47. 79. 255. 168. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 48. 238. 252. 252. 179. 12. 75. 121. 21. 0. 0. 253. 243. 50. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 38. 165. 253. 233. 208. 84. 0. 0. 0. 0. 0. 0. 253. 252. 165. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 7. 178. 252. 240. 71. 19. 28. 0. 0. 0. 0. 0. 0. 253. 252. 195. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 57. 252. 252. 63. 0. 0. 0. 0. 0. 0. 0. 0. 0. 253. 252. 195. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 198. 253. 190. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 255. 253. 196. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 76. 246. 252. 112. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 253. 252. 148. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 230. 25. 0. 0. 0. 0. 0. 0. 0. 0. 7. 135. 253. 186. 12. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 223. 0. 0. 0. 0. 0. 0. 0. 0. 7. 131. 252. 225. 71. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 145. 0. 0. 0. 0. 0. 0. 0. 48. 165. 252. 173. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 86. 253. 225. 0. 0. 0. 0. 0. 0. 114. 238. 253. 162. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 249. 146. 48. 29. 85. 178. 225. 253. 223. 167. 56. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 252. 252. 229. 215. 252. 252. 252. 196. 130. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 28. 199. 252. 252. 253. 252. 252. 233. 145. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 25. 128. 252. 253. 252. 141. 37. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] import numpy as np print('Classes: Setosa, Versicolor, Virginica') print(np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: Setosa, Versicolor, Virginica [0 1 2 3 4 5 6 7 8 9] Class distribution: [500 500 500 500 500 500 500 500 500 500] Example 2 - Visualize MNIST %matplotlib inline import matplotlib.pyplot as plt def plot_digit(X, y, idx): img = X[idx].reshape(28,28) plt.imshow(img, cmap='Greys', interpolation='nearest') plt.title('true label: %d' % y[idx]) plt.show() plot_digit(X, y, 4) API mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/","title":"MNIST Dataset"},{"location":"user_guide/data/mnist_data/#mnist-dataset","text":"A function that loads the MNIST dataset into NumPy arrays. from mlxtend.data import mnist_data","title":"MNIST Dataset"},{"location":"user_guide/data/mnist_data/#overview","text":"The MNIST dataset was constructed from two datasets of the US National Institute of Standards and Technology (NIST). The training set consists of handwritten digits from 250 different people, 50 percent high school students, and 50 percent employees from the Census Bureau. Note that the test set contains handwritten digits from different people following the same split. Features Each feature vector (row in the feature matrix) consists of 784 pixels (intensities) -- unrolled from the original 28x28 pixels images. Number of samples: A subset of 5000 images (the first 500 digits of each class) Target variable (discrete): {500x 0, ..., 500x 9}","title":"Overview"},{"location":"user_guide/data/mnist_data/#references","text":"Source: http://yann.lecun.com/exdb/mnist/ Y. LeCun and C. Cortes. Mnist handwritten digit database. AT&T Labs [Online]. Available: http://yann.lecun.com/exdb/mnist , 2010.","title":"References"},{"location":"user_guide/data/mnist_data/#example-1-dataset-overview","text":"from mlxtend.data import mnist_data X, y = mnist_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) Dimensions: 5000 x 784 1st row [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 51. 159. 253. 159. 50. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 48. 238. 252. 252. 252. 237. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 54. 227. 253. 252. 239. 233. 252. 57. 6. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 10. 60. 224. 252. 253. 252. 202. 84. 252. 253. 122. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 163. 252. 252. 252. 253. 252. 252. 96. 189. 253. 167. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 51. 238. 253. 253. 190. 114. 253. 228. 47. 79. 255. 168. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 48. 238. 252. 252. 179. 12. 75. 121. 21. 0. 0. 253. 243. 50. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 38. 165. 253. 233. 208. 84. 0. 0. 0. 0. 0. 0. 253. 252. 165. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 7. 178. 252. 240. 71. 19. 28. 0. 0. 0. 0. 0. 0. 253. 252. 195. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 57. 252. 252. 63. 0. 0. 0. 0. 0. 0. 0. 0. 0. 253. 252. 195. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 198. 253. 190. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 255. 253. 196. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 76. 246. 252. 112. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 253. 252. 148. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 230. 25. 0. 0. 0. 0. 0. 0. 0. 0. 7. 135. 253. 186. 12. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 223. 0. 0. 0. 0. 0. 0. 0. 0. 7. 131. 252. 225. 71. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 145. 0. 0. 0. 0. 0. 0. 0. 48. 165. 252. 173. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 86. 253. 225. 0. 0. 0. 0. 0. 0. 114. 238. 253. 162. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 249. 146. 48. 29. 85. 178. 225. 253. 223. 167. 56. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 252. 252. 229. 215. 252. 252. 252. 196. 130. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 28. 199. 252. 252. 253. 252. 252. 233. 145. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 25. 128. 252. 253. 252. 141. 37. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] import numpy as np print('Classes: Setosa, Versicolor, Virginica') print(np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: Setosa, Versicolor, Virginica [0 1 2 3 4 5 6 7 8 9] Class distribution: [500 500 500 500 500 500 500 500 500 500]","title":"Example 1 - Dataset overview"},{"location":"user_guide/data/mnist_data/#example-2-visualize-mnist","text":"%matplotlib inline import matplotlib.pyplot as plt def plot_digit(X, y, idx): img = X[idx].reshape(28,28) plt.imshow(img, cmap='Greys', interpolation='nearest') plt.title('true label: %d' % y[idx]) plt.show() plot_digit(X, y, 4)","title":"Example 2 - Visualize MNIST"},{"location":"user_guide/data/mnist_data/#api","text":"mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/","title":"API"},{"location":"user_guide/data/three_blobs_data/","text":"Three Blobs Dataset A function that loads the three_blobs dataset into NumPy arrays. from mlxtend.data import three_blobs_data Overview A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels \\in {0, 1, 2}, distribution: [50, 50, 50] References Example 1 - Dataset overview from mlxtend.data import three_blobs_data X, y = three_blobs_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) Dimensions: 150 x 2 1st row [ 2.60509732 1.22529553] import numpy as np print('Suggested cluster labels') print(np.unique(y)) print('Label distribution: %s' % np.bincount(y)) Suggested cluster labels [0 1 2] Label distribution: [50 50 50] import matplotlib.pyplot as plt plt.scatter(X[:,0], X[:,1], c='white', marker='o', s=50) plt.grid() plt.show() plt.scatter(X[y == 0, 0], X[y == 0, 1], s=50, c='lightgreen', marker='s', label='cluster 1') plt.scatter(X[y == 1,0], X[y == 1,1], s=50, c='orange', marker='o', label='cluster 2') plt.scatter(X[y == 2,0], X[y == 2,1], s=50, c='lightblue', marker='v', label='cluster 3') plt.legend(loc='lower left') plt.grid() plt.show() API three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data","title":"Three Blobs Dataset"},{"location":"user_guide/data/three_blobs_data/#three-blobs-dataset","text":"A function that loads the three_blobs dataset into NumPy arrays. from mlxtend.data import three_blobs_data","title":"Three Blobs Dataset"},{"location":"user_guide/data/three_blobs_data/#overview","text":"A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels \\in {0, 1, 2}, distribution: [50, 50, 50]","title":"Overview"},{"location":"user_guide/data/three_blobs_data/#references","text":"","title":"References"},{"location":"user_guide/data/three_blobs_data/#example-1-dataset-overview","text":"from mlxtend.data import three_blobs_data X, y = three_blobs_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) Dimensions: 150 x 2 1st row [ 2.60509732 1.22529553] import numpy as np print('Suggested cluster labels') print(np.unique(y)) print('Label distribution: %s' % np.bincount(y)) Suggested cluster labels [0 1 2] Label distribution: [50 50 50] import matplotlib.pyplot as plt plt.scatter(X[:,0], X[:,1], c='white', marker='o', s=50) plt.grid() plt.show() plt.scatter(X[y == 0, 0], X[y == 0, 1], s=50, c='lightgreen', marker='s', label='cluster 1') plt.scatter(X[y == 1,0], X[y == 1,1], s=50, c='orange', marker='o', label='cluster 2') plt.scatter(X[y == 2,0], X[y == 2,1], s=50, c='lightblue', marker='v', label='cluster 3') plt.legend(loc='lower left') plt.grid() plt.show()","title":"Example 1 - Dataset overview"},{"location":"user_guide/data/three_blobs_data/#api","text":"three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data","title":"API"},{"location":"user_guide/data/wine_data/","text":"Wine Dataset A function that loads the Wine dataset into NumPy arrays. from mlxtend.data import wine_data Overview The Wine dataset for classification. Samples 178 Features 13 Classes 3 Data Set Characteristics: Multivariate Attribute Characteristics: Integer, Real Associated Tasks: Classification Missing Values None column attribute 1) Class Label 2) Alcohol 3) Malic acid 4) Ash 5) Alcalinity of ash 6) Magnesium 7) Total phenols 8) Flavanoids 9) Nonflavanoid phenols 10) Proanthocyanins 11) Color intensity 12) Hue 13) OD280/OD315 of diluted wines 14) Proline class samples 0 59 1 71 2 48 References Forina, M. et al, PARVUS - An Extendible Package for Data Exploration, Classification and Correlation. Institute of Pharmaceutical and Food Analysis and Technologies, Via Brigata Salerno, 16147 Genoa, Italy. Source: https://archive.ics.uci.edu/ml/datasets/Wine Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository. Irvine, CA: University of California, School of Information and Computer Science. Example 1 - Dataset overview from mlxtend.data import wine_data X, y = wine_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['alcohol', 'malic acid', 'ash', 'ash alcalinity', 'magnesium', 'total phenols', 'flavanoids', 'nonflavanoid phenols', 'proanthocyanins', 'color intensity', 'hue', 'OD280/OD315 of diluted wines', 'proline']) print('1st row', X[0]) Dimensions: 178 x 13 Header: ['alcohol', 'malic acid', 'ash', 'ash alcalinity', 'magnesium', 'total phenols', 'flavanoids', 'nonflavanoid phenols', 'proanthocyanins', 'color intensity', 'hue', 'OD280/OD315 of diluted wines', 'proline'] 1st row [ 1.42300000e+01 1.71000000e+00 2.43000000e+00 1.56000000e+01 1.27000000e+02 2.80000000e+00 3.06000000e+00 2.80000000e-01 2.29000000e+00 5.64000000e+00 1.04000000e+00 3.92000000e+00 1.06500000e+03] import numpy as np print('Classes: %s' % np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: [0 1 2] Class distribution: [59 71 48] API wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"Wine Dataset"},{"location":"user_guide/data/wine_data/#wine-dataset","text":"A function that loads the Wine dataset into NumPy arrays. from mlxtend.data import wine_data","title":"Wine Dataset"},{"location":"user_guide/data/wine_data/#overview","text":"The Wine dataset for classification. Samples 178 Features 13 Classes 3 Data Set Characteristics: Multivariate Attribute Characteristics: Integer, Real Associated Tasks: Classification Missing Values None column attribute 1) Class Label 2) Alcohol 3) Malic acid 4) Ash 5) Alcalinity of ash 6) Magnesium 7) Total phenols 8) Flavanoids 9) Nonflavanoid phenols 10) Proanthocyanins 11) Color intensity 12) Hue 13) OD280/OD315 of diluted wines 14) Proline class samples 0 59 1 71 2 48","title":"Overview"},{"location":"user_guide/data/wine_data/#references","text":"Forina, M. et al, PARVUS - An Extendible Package for Data Exploration, Classification and Correlation. Institute of Pharmaceutical and Food Analysis and Technologies, Via Brigata Salerno, 16147 Genoa, Italy. Source: https://archive.ics.uci.edu/ml/datasets/Wine Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository. Irvine, CA: University of California, School of Information and Computer Science.","title":"References"},{"location":"user_guide/data/wine_data/#example-1-dataset-overview","text":"from mlxtend.data import wine_data X, y = wine_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['alcohol', 'malic acid', 'ash', 'ash alcalinity', 'magnesium', 'total phenols', 'flavanoids', 'nonflavanoid phenols', 'proanthocyanins', 'color intensity', 'hue', 'OD280/OD315 of diluted wines', 'proline']) print('1st row', X[0]) Dimensions: 178 x 13 Header: ['alcohol', 'malic acid', 'ash', 'ash alcalinity', 'magnesium', 'total phenols', 'flavanoids', 'nonflavanoid phenols', 'proanthocyanins', 'color intensity', 'hue', 'OD280/OD315 of diluted wines', 'proline'] 1st row [ 1.42300000e+01 1.71000000e+00 2.43000000e+00 1.56000000e+01 1.27000000e+02 2.80000000e+00 3.06000000e+00 2.80000000e-01 2.29000000e+00 5.64000000e+00 1.04000000e+00 3.92000000e+00 1.06500000e+03] import numpy as np print('Classes: %s' % np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: [0 1 2] Class distribution: [59 71 48]","title":"Example 1 - Dataset overview"},{"location":"user_guide/data/wine_data/#api","text":"wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"API"},{"location":"user_guide/evaluate/BootstrapOutOfBag/","text":"BootstrapOutOfBag An implementation of the out-of-bag bootstrap to evaluate supervised learning algorithms. from mlxtend.evaluate import BootstrapOutOfBag Overview Originally, the bootstrap method aims to determine the statistical properties of an estimator when the underlying distribution was unknown and additional samples are not available. Now, in order to exploit this method for the evaluation of predictive models, such as hypotheses for classification and regression, we may prefer a slightly different approach to bootstrapping using the so-called Out-Of-Bag (OOB) or Leave-One-Out Bootstrap (LOOB) technique. Here, we use out-of-bag samples as test sets for evaluation instead of evaluating the model on the training data. Out-of-bag samples are the unique sets of instances that are not used for model fitting as shown in the figure below [1]. The figure above illustrates how three random bootstrap samples drawn from an exemplary ten-sample dataset ( X_1,X_2, ..., X_{10} ) and their out-of-bag sample for testing may look like. In practice, Bradley Efron and Robert Tibshirani recommend drawing 50 to 200 bootstrap samples as being sufficient for reliable estimates [2]. References [1] https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html [2] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997. Example 1 -- Evaluating the predictive performance of a model The BootstrapOutOfBag class mimics the behavior of scikit-learn's cross-validation classes, e.g., KFold : from mlxtend.evaluate import BootstrapOutOfBag import numpy as np oob = BootstrapOutOfBag(n_splits=3) for train, test in oob.split(np.array([1, 2, 3, 4, 5])): print(train, test) [4 2 1 3 3] [0] [2 4 1 2 1] [0 3] [4 3 3 4 1] [0 2] Consequently, we can use BootstrapOutOfBag objects via the cross_val_score method: from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score iris = load_iris() X = iris.data y = iris.target lr = LogisticRegression() print(cross_val_score(lr, X, y)) [ 0.96078431 0.92156863 0.95833333] print(cross_val_score(lr, X, y, cv=BootstrapOutOfBag(n_splits=3, random_seed=456))) [ 0.92727273 0.96226415 0.94444444] In practice, it is recommended to run at least 200 iterations, though: print('Mean accuracy: %.1f%%' % np.mean(100*cross_val_score( lr, X, y, cv=BootstrapOutOfBag(n_splits=200, random_seed=456)))) Mean accuracy: 94.8% Using the bootstrap, we can use the percentile method to compute the confidence bounds of the performance estimate. We pick our lower and upper confidence bounds as follows: ACC_{lower} = \\alpha_1th percentile of the ACC_{boot} distribution ACC_{lower} = \\alpha_2th percentile of the ACC_{boot} distribution where \\alpha_1 = \\alpha and \\alpha_2 = 1-\\alpha , and the degree of confidence to compute the 100 \\times (1-2 \\times \\alpha) confidence interval. For instance, to compute a 95% confidence interval, we pick \\alpha=0.025 to obtain the 2.5th and 97.5th percentiles of the b bootstrap samples distribution as the upper and lower confidence bounds. import matplotlib.pyplot as plt %matplotlib inline accuracies = cross_val_score(lr, X, y, cv=BootstrapOutOfBag(n_splits=1000, random_seed=456)) mean = np.mean(accuracies) lower = np.percentile(accuracies, 2.5) upper = np.percentile(accuracies, 97.5) fig, ax = plt.subplots(figsize=(8, 4)) ax.vlines(mean, [0], 40, lw=2.5, linestyle='-', label='mean') ax.vlines(lower, [0], 15, lw=2.5, linestyle='-.', label='CI95 percentile') ax.vlines(upper, [0], 15, lw=2.5, linestyle='-.') ax.hist(accuracies, bins=11, color='#0080ff', edgecolor=\"none\", alpha=0.3) plt.legend(loc='upper left') plt.show() API BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/ Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn.","title":"BootstrapOutOfBag"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#bootstrapoutofbag","text":"An implementation of the out-of-bag bootstrap to evaluate supervised learning algorithms. from mlxtend.evaluate import BootstrapOutOfBag","title":"BootstrapOutOfBag"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#overview","text":"Originally, the bootstrap method aims to determine the statistical properties of an estimator when the underlying distribution was unknown and additional samples are not available. Now, in order to exploit this method for the evaluation of predictive models, such as hypotheses for classification and regression, we may prefer a slightly different approach to bootstrapping using the so-called Out-Of-Bag (OOB) or Leave-One-Out Bootstrap (LOOB) technique. Here, we use out-of-bag samples as test sets for evaluation instead of evaluating the model on the training data. Out-of-bag samples are the unique sets of instances that are not used for model fitting as shown in the figure below [1]. The figure above illustrates how three random bootstrap samples drawn from an exemplary ten-sample dataset ( X_1,X_2, ..., X_{10} ) and their out-of-bag sample for testing may look like. In practice, Bradley Efron and Robert Tibshirani recommend drawing 50 to 200 bootstrap samples as being sufficient for reliable estimates [2].","title":"Overview"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#references","text":"[1] https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html [2] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997.","title":"References"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#example-1-evaluating-the-predictive-performance-of-a-model","text":"The BootstrapOutOfBag class mimics the behavior of scikit-learn's cross-validation classes, e.g., KFold : from mlxtend.evaluate import BootstrapOutOfBag import numpy as np oob = BootstrapOutOfBag(n_splits=3) for train, test in oob.split(np.array([1, 2, 3, 4, 5])): print(train, test) [4 2 1 3 3] [0] [2 4 1 2 1] [0 3] [4 3 3 4 1] [0 2] Consequently, we can use BootstrapOutOfBag objects via the cross_val_score method: from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score iris = load_iris() X = iris.data y = iris.target lr = LogisticRegression() print(cross_val_score(lr, X, y)) [ 0.96078431 0.92156863 0.95833333] print(cross_val_score(lr, X, y, cv=BootstrapOutOfBag(n_splits=3, random_seed=456))) [ 0.92727273 0.96226415 0.94444444] In practice, it is recommended to run at least 200 iterations, though: print('Mean accuracy: %.1f%%' % np.mean(100*cross_val_score( lr, X, y, cv=BootstrapOutOfBag(n_splits=200, random_seed=456)))) Mean accuracy: 94.8% Using the bootstrap, we can use the percentile method to compute the confidence bounds of the performance estimate. We pick our lower and upper confidence bounds as follows: ACC_{lower} = \\alpha_1th percentile of the ACC_{boot} distribution ACC_{lower} = \\alpha_2th percentile of the ACC_{boot} distribution where \\alpha_1 = \\alpha and \\alpha_2 = 1-\\alpha , and the degree of confidence to compute the 100 \\times (1-2 \\times \\alpha) confidence interval. For instance, to compute a 95% confidence interval, we pick \\alpha=0.025 to obtain the 2.5th and 97.5th percentiles of the b bootstrap samples distribution as the upper and lower confidence bounds. import matplotlib.pyplot as plt %matplotlib inline accuracies = cross_val_score(lr, X, y, cv=BootstrapOutOfBag(n_splits=1000, random_seed=456)) mean = np.mean(accuracies) lower = np.percentile(accuracies, 2.5) upper = np.percentile(accuracies, 97.5) fig, ax = plt.subplots(figsize=(8, 4)) ax.vlines(mean, [0], 40, lw=2.5, linestyle='-', label='mean') ax.vlines(lower, [0], 15, lw=2.5, linestyle='-.', label='CI95 percentile') ax.vlines(upper, [0], 15, lw=2.5, linestyle='-.') ax.hist(accuracies, bins=11, color='#0080ff', edgecolor=\"none\", alpha=0.3) plt.legend(loc='upper left') plt.show()","title":"Example 1 -- Evaluating the predictive performance of a model"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#api","text":"BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/","title":"API"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn.","title":"Methods"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/","text":"PredefinedHoldoutSplit Split a dataset into a train and validation subset for validation based on user-specified indices. from mlxtend.evaluate import PredefinedHoldoutSplit Overview The PredefinedHoldoutSplit class serves as an alternative to scikit-learn's KFold class, where the PredefinedHoldoutSplit class splits a dataset into training and a validation subsets without rotation, based on validation indices specified by the user. The PredefinedHoldoutSplit can be used as argument for cv parameters in scikit-learn's GridSearchCV etc. For performing a random split, see the related RandomHoldoutSplit class. Example 1 -- Iterating Over a PredefinedHoldoutSplit from mlxtend.evaluate import PredefinedHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() h_iter = PredefinedHoldoutSplit(valid_indices=[0, 1, 99]) cnt = 0 for train_ind, valid_ind in h_iter.split(X, y): cnt += 1 print(cnt) 1 print(train_ind[:5]) print(valid_ind[:5]) [2 3 4 5 6] [ 0 1 99] Example 2 -- PredefinedHoldoutSplit in GridSearch from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from mlxtend.evaluate import PredefinedHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() params = {'n_neighbors': [1, 2, 3, 4, 5]} grid = GridSearchCV(KNeighborsClassifier(), param_grid=params, cv=PredefinedHoldoutSplit(valid_indices=[0, 1, 99])) grid.fit(X, y) assert grid.n_splits_ == 1 print(grid.grid_scores_) [mean: 1.00000, std: 0.00000, params: {'n_neighbors': 1}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 2}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 3}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 4}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 5}] /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/model_selection/_search.py:762: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning) API PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting. Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"PredefinedHoldoutSplit"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#predefinedholdoutsplit","text":"Split a dataset into a train and validation subset for validation based on user-specified indices. from mlxtend.evaluate import PredefinedHoldoutSplit","title":"PredefinedHoldoutSplit"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#overview","text":"The PredefinedHoldoutSplit class serves as an alternative to scikit-learn's KFold class, where the PredefinedHoldoutSplit class splits a dataset into training and a validation subsets without rotation, based on validation indices specified by the user. The PredefinedHoldoutSplit can be used as argument for cv parameters in scikit-learn's GridSearchCV etc. For performing a random split, see the related RandomHoldoutSplit class.","title":"Overview"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#example-1-iterating-over-a-predefinedholdoutsplit","text":"from mlxtend.evaluate import PredefinedHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() h_iter = PredefinedHoldoutSplit(valid_indices=[0, 1, 99]) cnt = 0 for train_ind, valid_ind in h_iter.split(X, y): cnt += 1 print(cnt) 1 print(train_ind[:5]) print(valid_ind[:5]) [2 3 4 5 6] [ 0 1 99]","title":"Example 1 -- Iterating Over a PredefinedHoldoutSplit"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#example-2-predefinedholdoutsplit-in-gridsearch","text":"from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from mlxtend.evaluate import PredefinedHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() params = {'n_neighbors': [1, 2, 3, 4, 5]} grid = GridSearchCV(KNeighborsClassifier(), param_grid=params, cv=PredefinedHoldoutSplit(valid_indices=[0, 1, 99])) grid.fit(X, y) assert grid.n_splits_ == 1 print(grid.grid_scores_) [mean: 1.00000, std: 0.00000, params: {'n_neighbors': 1}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 2}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 3}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 4}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 5}] /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/model_selection/_search.py:762: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning)","title":"Example 2 -- PredefinedHoldoutSplit in GridSearch"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#api","text":"PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting.","title":"API"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"user_guide/evaluate/RandomHoldoutSplit/","text":"RandomHoldoutSplit Randomly split a dataset into a train and validation subset for validation. from mlxtend.evaluate import RandomHoldoutSplit Overview The RandomHoldoutSplit class serves as an alternative to scikit-learn's KFold class, where the RandomHoldoutSplit class splits a dataset into training and a validation subsets without rotation. The RandomHoldoutSplit can be used as argument for cv parameters in scikit-learn's GridSearchCV etc. The term \"random\" in RandomHoldoutSplit comes from the fact that the split is specified by the random_seed rather than specifying the training and validation set indices manually as in the PredefinedHoldoutSplit class in mlxtend. Example 1 -- Iterating Over a RandomHoldoutSplit from mlxtend.evaluate import RandomHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() h_iter = RandomHoldoutSplit(valid_size=0.3, random_seed=123) cnt = 0 for train_ind, valid_ind in h_iter.split(X, y): cnt += 1 print(cnt) 1 print(train_ind[:5]) print(valid_ind[:5]) [ 60 16 88 130 6] [ 72 125 80 86 117] Example 2 -- RandomHoldoutSplit in GridSearch from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from mlxtend.evaluate import RandomHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() params = {'n_neighbors': [1, 2, 3, 4, 5]} grid = GridSearchCV(KNeighborsClassifier(), param_grid=params, cv=RandomHoldoutSplit(valid_size=0.3, random_seed=123)) grid.fit(X, y) assert grid.n_splits_ == 1 print(grid.grid_scores_) [mean: 0.95556, std: 0.00000, params: {'n_neighbors': 1}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 2}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 3}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 4}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 5}] /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/model_selection/_search.py:762: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning) API RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"RandomHoldoutSplit"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#randomholdoutsplit","text":"Randomly split a dataset into a train and validation subset for validation. from mlxtend.evaluate import RandomHoldoutSplit","title":"RandomHoldoutSplit"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#overview","text":"The RandomHoldoutSplit class serves as an alternative to scikit-learn's KFold class, where the RandomHoldoutSplit class splits a dataset into training and a validation subsets without rotation. The RandomHoldoutSplit can be used as argument for cv parameters in scikit-learn's GridSearchCV etc. The term \"random\" in RandomHoldoutSplit comes from the fact that the split is specified by the random_seed rather than specifying the training and validation set indices manually as in the PredefinedHoldoutSplit class in mlxtend.","title":"Overview"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#example-1-iterating-over-a-randomholdoutsplit","text":"from mlxtend.evaluate import RandomHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() h_iter = RandomHoldoutSplit(valid_size=0.3, random_seed=123) cnt = 0 for train_ind, valid_ind in h_iter.split(X, y): cnt += 1 print(cnt) 1 print(train_ind[:5]) print(valid_ind[:5]) [ 60 16 88 130 6] [ 72 125 80 86 117]","title":"Example 1 -- Iterating Over a RandomHoldoutSplit"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#example-2-randomholdoutsplit-in-gridsearch","text":"from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from mlxtend.evaluate import RandomHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() params = {'n_neighbors': [1, 2, 3, 4, 5]} grid = GridSearchCV(KNeighborsClassifier(), param_grid=params, cv=RandomHoldoutSplit(valid_size=0.3, random_seed=123)) grid.fit(X, y) assert grid.n_splits_ == 1 print(grid.grid_scores_) [mean: 0.95556, std: 0.00000, params: {'n_neighbors': 1}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 2}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 3}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 4}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 5}] /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/model_selection/_search.py:762: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning)","title":"Example 2 -- RandomHoldoutSplit in GridSearch"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#api","text":"RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not","title":"API"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"user_guide/evaluate/bootstrap/","text":"Bootstrap An implementation of the ordinary nonparametric bootstrap to bootstrap a single statistic (for example, the mean. median, R^2 of a regression fit, and so forth). from mlxtend.evaluate import bootstrap Overview The bootstrap offers an easy and effective way to estimate the distribution of a statistic via simulation, by drawing (or generating) new samples from an existing sample with replacement. Note that the bootstrap does not require making any assumptions about the sample statistic or dataset being normally distributed. Using the bootstrap, we can estimate sample statistics and compute the standard error of the mean and confidence intervals as if we have drawn a number of samples from an infinite population. In a nutshell, the bootstrap procedure can be described as follows: Draw a sample with replacement Compute the sample statistic Repeat step 1-2 n times Compute the standard deviation (standard error of the mean of the statistic) Compute the confidence interval Or, in simple terms, we can interpret the bootstrap a means of drawing a potentially endless number of (new) samples from a population by resampling the original dataset. Note that the term \"bootstrap replicate\" is being used quite loosely in current literature; many researchers and practitioners use it to define the number of bootstrap samples we draw from the original dataset. However, in the context of this documentation and the code annotation, we use the original definition of bootstrap repliactes and use it to refer to the statistic computed from a bootstrap sample. References [1] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997. Example 1 -- Bootstrapping the Mean This simple example illustrates how you could bootstrap the mean of a sample. import numpy as np from mlxtend.evaluate import bootstrap rng = np.random.RandomState(123) x = rng.normal(loc=5., size=100) original, std_err, ci_bounds = bootstrap(x, num_rounds=1000, func=np.mean, ci=0.95, seed=123) print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, std_err, ci_bounds[0], ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] Example 2 - Bootstrapping a Regression Fit This example illustrates how you can bootstrap the R^2 of a regression fit on the training data. from mlxtend.data import autompg_data from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score X, y = autompg_data() lr = LinearRegression() def r2_fit(X, model=lr): x, y = X[:, 0].reshape(-1, 1), X[:, 1] pred = lr.fit(x, y).predict(x) return r2_score(y, pred) original, std_err, ci_bounds = bootstrap(X, num_rounds=1000, func=r2_fit, ci=0.95, seed=123) print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, std_err, ci_bounds[0], ci_bounds[1])) Mean: 0.90, SE: +/- 0.01, CI95: [0.89, 0.92] API bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/","title":"Bootstrap"},{"location":"user_guide/evaluate/bootstrap/#bootstrap","text":"An implementation of the ordinary nonparametric bootstrap to bootstrap a single statistic (for example, the mean. median, R^2 of a regression fit, and so forth). from mlxtend.evaluate import bootstrap","title":"Bootstrap"},{"location":"user_guide/evaluate/bootstrap/#overview","text":"The bootstrap offers an easy and effective way to estimate the distribution of a statistic via simulation, by drawing (or generating) new samples from an existing sample with replacement. Note that the bootstrap does not require making any assumptions about the sample statistic or dataset being normally distributed. Using the bootstrap, we can estimate sample statistics and compute the standard error of the mean and confidence intervals as if we have drawn a number of samples from an infinite population. In a nutshell, the bootstrap procedure can be described as follows: Draw a sample with replacement Compute the sample statistic Repeat step 1-2 n times Compute the standard deviation (standard error of the mean of the statistic) Compute the confidence interval Or, in simple terms, we can interpret the bootstrap a means of drawing a potentially endless number of (new) samples from a population by resampling the original dataset. Note that the term \"bootstrap replicate\" is being used quite loosely in current literature; many researchers and practitioners use it to define the number of bootstrap samples we draw from the original dataset. However, in the context of this documentation and the code annotation, we use the original definition of bootstrap repliactes and use it to refer to the statistic computed from a bootstrap sample.","title":"Overview"},{"location":"user_guide/evaluate/bootstrap/#references","text":"[1] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997.","title":"References"},{"location":"user_guide/evaluate/bootstrap/#example-1-bootstrapping-the-mean","text":"This simple example illustrates how you could bootstrap the mean of a sample. import numpy as np from mlxtend.evaluate import bootstrap rng = np.random.RandomState(123) x = rng.normal(loc=5., size=100) original, std_err, ci_bounds = bootstrap(x, num_rounds=1000, func=np.mean, ci=0.95, seed=123) print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, std_err, ci_bounds[0], ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26]","title":"Example 1 -- Bootstrapping the Mean"},{"location":"user_guide/evaluate/bootstrap/#example-2-bootstrapping-a-regression-fit","text":"This example illustrates how you can bootstrap the R^2 of a regression fit on the training data. from mlxtend.data import autompg_data from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score X, y = autompg_data() lr = LinearRegression() def r2_fit(X, model=lr): x, y = X[:, 0].reshape(-1, 1), X[:, 1] pred = lr.fit(x, y).predict(x) return r2_score(y, pred) original, std_err, ci_bounds = bootstrap(X, num_rounds=1000, func=r2_fit, ci=0.95, seed=123) print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, std_err, ci_bounds[0], ci_bounds[1])) Mean: 0.90, SE: +/- 0.01, CI95: [0.89, 0.92]","title":"Example 2 - Bootstrapping a Regression Fit"},{"location":"user_guide/evaluate/bootstrap/#api","text":"bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/","title":"API"},{"location":"user_guide/evaluate/bootstrap_point632_score/","text":"bootstrap_point632_score An implementation of the .632 bootstrap to evaluate supervised learning algorithms. from mlxtend.evaluate import bootstrap_point632_score Overview Originally, the bootstrap method aims to determine the statistical properties of an estimator when the underlying distribution was unknown and additional samples are not available. Now, in order to exploit this method for the evaluation of predictive models, such as hypotheses for classification and regression, we may prefer a slightly different approach to bootstrapping using the so-called Out-Of-Bag (OOB) or Leave-One-Out Bootstrap (LOOB) technique. Here, we use out-of-bag samples as test sets for evaluation instead of evaluating the model on the training data. Out-of-bag samples are the unique sets of instances that are not used for model fitting as shown in the figure below [1]. The figure above illustrates how three random bootstrap samples drawn from an exemplary ten-sample dataset ( X_1,X_2, ..., X_{10} ) and their out-of-bag sample for testing may look like. In practice, Bradley Efron and Robert Tibshirani recommend drawing 50 to 200 bootstrap samples as being sufficient for reliable estimates [2]. .632 Bootstrap In 1983, Bradley Efron described the .632 Estimate , a further improvement to address the pessimistic bias of the bootstrap cross-validation approach described above [3]. The pessimistic bias in the \"classic\" bootstrap method can be attributed to the fact that the bootstrap samples only contain approximately 63.2% of the unique samples from the original dataset. For instance, we can compute the probability that a given sample from a dataset of size n is not drawn as a bootstrap sample as P (\\text{not chosen}) = \\bigg(1 - \\frac{1}{n}\\bigg)^n, which is asymptotically equivalent to \\frac{1}{e} \\approx 0.368 as n \\rightarrow \\infty. Vice versa, we can then compute the probability that a sample is chosen as P (\\text{chosen}) = 1 - \\bigg(1 - \\frac{1}{n}\\bigg)^n \\approx 0.632 for reasonably large datasets, so that we'd select approximately 0.632 \\times n uniques samples as bootstrap training sets and reserve 0.368 \\times n out-of-bag samples for testing in each iteration. Now, to address the bias that is due to this the sampling with replacement, Bradley Efron proposed the .632 Estimate that we mentioned earlier, which is computed via the following equation: \\text{ACC}_{boot} = \\frac{1}{b} \\sum_{i=1}^b \\big(0.632 \\cdot \\text{ACC}_{h, i} + 0.368 \\cdot \\text{ACC}_{r, i}\\big), where \\text{ACC}_{r, i} is the resubstitution accuracy, and \\text{ACC}_{h, i} is the accuracy on the out-of-bag sample. .632+ Bootstrap Now, while the .632 Boostrap attempts to address the pessimistic bias of the estimate, an optimistic bias may occur with models that tend to overfit so that Bradley Efron and Robert Tibshirani proposed the The .632+ Bootstrap Method (Efron and Tibshirani, 1997). Instead of using a fixed \"weight\" \\omega = 0.632 in ACC_{\\text{boot}} = \\frac{1}{b} \\sum_{i=1}^b \\big(\\omega \\cdot \\text{ACC}_{h, i} + (1-\\omega) \\cdot \\text{ACC}_{r, i} \\big), we compute the weight \\gamma as \\omega = \\frac{0.632}{1 - 0.368 \\times R}, where R is the relative overfitting rate R = \\frac{(-1) \\times (\\text{ACC}_{h, i} - \\text{ACC}_{r, i})}{\\gamma - (1 -\\text{ACC}_{h, i})}. (Since we are plugging \\omega into the equation for computing ACC_{boot} that we defined above, \\text{ACC}_{h, i} and \\text{ACC}_{r, i} still refer to the resubstitution and out-of-bag accuracy estimates in the i th bootstrap round, respectively.) Further, we need to determine the no-information rate \\gamma in order to compute R . For instance, we can compute \\gamma by fitting a model to a dataset that contains all possible combinations between samples x_{i'} and target class labels y_{i} \u2014 we pretend that the observations and class labels are independent: \\gamma = \\frac{1}{n^2} \\sum_{i=1}^{n} \\sum_{i '=1}^{n} L(y_{i}, f(x_{i '})). Alternatively, we can estimate the no-information rate \\gamma as follows: \\gamma = \\sum_{k=1}^K p_k (1 - q_k), where p_k is the proportion of class k samples observed in the dataset, and q_k is the proportion of class k samples that the classifier predicts in the dataset. References [1] https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html [2] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997. [3] Efron, Bradley. 1983. \u201cEstimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\u201d Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [4] Efron, Bradley, and Robert Tibshirani. 1997. \u201cImprovements on Cross-Validation: The .632+ Bootstrap Method.\u201d Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Example 1 -- Evaluating the predictive performance of a model via the classic out-of-bag Bootstrap The bootstrap_point632_score function mimics the behavior of scikit-learn's `cross_val_score, and a typically usage example is shown below: from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y, method='oob') acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 94.52% 95% Confidence interval: [88.88, 98.28] Example 2 -- Evaluating the predictive performance of a model via the .632 Bootstrap from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y) acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 96.58% 95% Confidence interval: [92.37, 98.97] Example 3 -- Evaluating the predictive performance of a model via the .632+ Bootstrap from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y, method='.632+') acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 96.40% 95% Confidence interval: [92.34, 99.00] API bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \u201cEstimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\u201d Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \u201cImprovements on Cross-Validation: The .632+ Bootstrap Method.\u201d Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/","title":"bootstrap_point632_score"},{"location":"user_guide/evaluate/bootstrap_point632_score/#bootstrap_point632_score","text":"An implementation of the .632 bootstrap to evaluate supervised learning algorithms. from mlxtend.evaluate import bootstrap_point632_score","title":"bootstrap_point632_score"},{"location":"user_guide/evaluate/bootstrap_point632_score/#overview","text":"Originally, the bootstrap method aims to determine the statistical properties of an estimator when the underlying distribution was unknown and additional samples are not available. Now, in order to exploit this method for the evaluation of predictive models, such as hypotheses for classification and regression, we may prefer a slightly different approach to bootstrapping using the so-called Out-Of-Bag (OOB) or Leave-One-Out Bootstrap (LOOB) technique. Here, we use out-of-bag samples as test sets for evaluation instead of evaluating the model on the training data. Out-of-bag samples are the unique sets of instances that are not used for model fitting as shown in the figure below [1]. The figure above illustrates how three random bootstrap samples drawn from an exemplary ten-sample dataset ( X_1,X_2, ..., X_{10} ) and their out-of-bag sample for testing may look like. In practice, Bradley Efron and Robert Tibshirani recommend drawing 50 to 200 bootstrap samples as being sufficient for reliable estimates [2].","title":"Overview"},{"location":"user_guide/evaluate/bootstrap_point632_score/#632-bootstrap","text":"In 1983, Bradley Efron described the .632 Estimate , a further improvement to address the pessimistic bias of the bootstrap cross-validation approach described above [3]. The pessimistic bias in the \"classic\" bootstrap method can be attributed to the fact that the bootstrap samples only contain approximately 63.2% of the unique samples from the original dataset. For instance, we can compute the probability that a given sample from a dataset of size n is not drawn as a bootstrap sample as P (\\text{not chosen}) = \\bigg(1 - \\frac{1}{n}\\bigg)^n, which is asymptotically equivalent to \\frac{1}{e} \\approx 0.368 as n \\rightarrow \\infty. Vice versa, we can then compute the probability that a sample is chosen as P (\\text{chosen}) = 1 - \\bigg(1 - \\frac{1}{n}\\bigg)^n \\approx 0.632 for reasonably large datasets, so that we'd select approximately 0.632 \\times n uniques samples as bootstrap training sets and reserve 0.368 \\times n out-of-bag samples for testing in each iteration. Now, to address the bias that is due to this the sampling with replacement, Bradley Efron proposed the .632 Estimate that we mentioned earlier, which is computed via the following equation: \\text{ACC}_{boot} = \\frac{1}{b} \\sum_{i=1}^b \\big(0.632 \\cdot \\text{ACC}_{h, i} + 0.368 \\cdot \\text{ACC}_{r, i}\\big), where \\text{ACC}_{r, i} is the resubstitution accuracy, and \\text{ACC}_{h, i} is the accuracy on the out-of-bag sample.","title":".632 Bootstrap"},{"location":"user_guide/evaluate/bootstrap_point632_score/#632-bootstrap_1","text":"Now, while the .632 Boostrap attempts to address the pessimistic bias of the estimate, an optimistic bias may occur with models that tend to overfit so that Bradley Efron and Robert Tibshirani proposed the The .632+ Bootstrap Method (Efron and Tibshirani, 1997). Instead of using a fixed \"weight\" \\omega = 0.632 in ACC_{\\text{boot}} = \\frac{1}{b} \\sum_{i=1}^b \\big(\\omega \\cdot \\text{ACC}_{h, i} + (1-\\omega) \\cdot \\text{ACC}_{r, i} \\big), we compute the weight \\gamma as \\omega = \\frac{0.632}{1 - 0.368 \\times R}, where R is the relative overfitting rate R = \\frac{(-1) \\times (\\text{ACC}_{h, i} - \\text{ACC}_{r, i})}{\\gamma - (1 -\\text{ACC}_{h, i})}. (Since we are plugging \\omega into the equation for computing ACC_{boot} that we defined above, \\text{ACC}_{h, i} and \\text{ACC}_{r, i} still refer to the resubstitution and out-of-bag accuracy estimates in the i th bootstrap round, respectively.) Further, we need to determine the no-information rate \\gamma in order to compute R . For instance, we can compute \\gamma by fitting a model to a dataset that contains all possible combinations between samples x_{i'} and target class labels y_{i} \u2014 we pretend that the observations and class labels are independent: \\gamma = \\frac{1}{n^2} \\sum_{i=1}^{n} \\sum_{i '=1}^{n} L(y_{i}, f(x_{i '})). Alternatively, we can estimate the no-information rate \\gamma as follows: \\gamma = \\sum_{k=1}^K p_k (1 - q_k), where p_k is the proportion of class k samples observed in the dataset, and q_k is the proportion of class k samples that the classifier predicts in the dataset.","title":".632+ Bootstrap"},{"location":"user_guide/evaluate/bootstrap_point632_score/#references","text":"[1] https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html [2] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997. [3] Efron, Bradley. 1983. \u201cEstimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\u201d Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [4] Efron, Bradley, and Robert Tibshirani. 1997. \u201cImprovements on Cross-Validation: The .632+ Bootstrap Method.\u201d Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703.","title":"References"},{"location":"user_guide/evaluate/bootstrap_point632_score/#example-1-evaluating-the-predictive-performance-of-a-model-via-the-classic-out-of-bag-bootstrap","text":"The bootstrap_point632_score function mimics the behavior of scikit-learn's `cross_val_score, and a typically usage example is shown below: from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y, method='oob') acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 94.52% 95% Confidence interval: [88.88, 98.28]","title":"Example 1 -- Evaluating the predictive performance of a model via the classic out-of-bag Bootstrap"},{"location":"user_guide/evaluate/bootstrap_point632_score/#example-2-evaluating-the-predictive-performance-of-a-model-via-the-632-bootstrap","text":"from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y) acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 96.58% 95% Confidence interval: [92.37, 98.97]","title":"Example 2 -- Evaluating the predictive performance of a model via the .632 Bootstrap"},{"location":"user_guide/evaluate/bootstrap_point632_score/#example-3-evaluating-the-predictive-performance-of-a-model-via-the-632-bootstrap","text":"from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y, method='.632+') acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 96.40% 95% Confidence interval: [92.34, 99.00]","title":"Example 3 -- Evaluating the predictive performance of a model via the .632+ Bootstrap"},{"location":"user_guide/evaluate/bootstrap_point632_score/#api","text":"bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \u201cEstimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\u201d Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \u201cImprovements on Cross-Validation: The .632+ Bootstrap Method.\u201d Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/","title":"API"},{"location":"user_guide/evaluate/cochrans_q/","text":"Cochran's Q Test Cochran's Q test for comparing the performance of multiple classifiers. from mlxtend.evaluate import cochrans_q Overview Cochran's Q test can be regarded as a generalized version of McNemar's test that can be applied to evaluate multiple classifiers. In a sense, Cochran's Q test is analogous to ANOVA for binary outcomes. To compare more than two classifiers, we can use Cochran's Q test, which has a test statistic Q that is approximately, (similar to McNemar's test), distributed as chi-squared with L-1 degrees of freedom, where L is the number of models we evaluate (since L=2 for McNemar's test, McNemars test statistic approximates a chi-squared distribution with one degree of freedom). More formally, Cochran's Q test tests the hypothesis that there is no difference between the classification accuracies [1]: p_i: H_0 = p_1 = p_2 = \\cdots = p_L. Let \\{D_1, \\dots , D_L\\} be a set of classifiers who have all been tested on the same dataset. If the L classifiers don't perform differently, then the following Q statistic is distributed approximately as \"chi-squared\" with L-1 degrees of freedom: Q_C = (L-1) \\frac{L \\sum^{L}_{i=1}G_{i}^{2} - T^2}{LT - \\sum^{N_{ts}}_{j=1} (L_j)^2}. Here, G_i is the number of objects out of N_{ts} correctly classified by D_i= 1, \\dots L ; L_j is the number of classifiers out of L that correctly classified object \\mathbf{z}_j \\in \\mathbf{Z}_{ts} , where \\mathbf{Z}_{ts} = \\{\\mathbf{z}_1, ... \\mathbf{z}_{N_{ts}}\\} is the test dataset on which the classifers are tested on; and T is the total number of correct number of votes among the L classifiers [2]: T = \\sum_{i=1}^{L} G_i = \\sum^{N_{ts}}_{j=1} L_j. To perform Cochran's Q test, we typically organize the classificier predictions in a binary N_{ts} \\times L matrix. The ij\\text{th} entry of such matrix is 0 if a classifier D_j has misclassified a data example (vector) \\mathbf{z}_i and 1 otherwise (if the classifier predicted the class label l(\\mathbf{z}_i) correctly) [2]. The following example taken from [2] illustrates how the classification results may be organized. For instance, assume we have the ground truth labels of the test dataset y_true and the following predictions by 3 classifiers ( y_model_1 , y_model_2 , and y_model_3 ): y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) The table of correct (1) and incorrect (0) classifications may then look as follows: D_1 (model 1) D_2 (model 2) D_3 (model 3) Occurrences 1 1 1 80 1 1 0 2 1 0 1 0 1 0 0 2 0 1 1 9 0 1 0 1 0 0 1 3 0 0 0 3 Accuracy 84/100*100% = 84% 92/100*100% = 92% 92/100*100% = 92% By plugging in the respective value into the previous equation, we obtain the following Q value [2]: Q_c = 2 \\times \\frac{3 \\times (84^2 + 92^2 + 92^2) - 268^2}{3\\times 268-(80 \\times 9 + 11 \\times 4 + 6 \\times 1)} \\approx 7.5294. (Note that the Q value in [2] is listed as 3.7647 due to a typo as discussed with the author, the value 7.5294 is the correct one.) Now, the Q value (approximating \\chi^2 ) corresponds to a p-value of approx. 0.023 assuming a \\chi^2 distribution with L-1 = 2 degrees of freedom. Assuming that we chose a significance level of \\alpha=0.05 , we would reject the null hypothesis that all classifiers perform equally well, since 0.023 < \\alpha . In practice, if we successfully rejected the null hypothesis, we could perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions. References [1] Fleiss, Joseph L., Bruce Levin, and Myunghee Cho Paik. Statistical methods for rates and proportions. John Wiley & Sons, 2013. [2] Kuncheva, Ludmila I. Combining pattern classifiers: methods and algorithms. John Wiley & Sons, 2004. Example 1 - Cochran's Q test import numpy as np from mlxtend.evaluate import cochrans_q from mlxtend.evaluate import mcnemar_table from mlxtend.evaluate import mcnemar ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) Assuming a significance level \\alpha=0.05 , we can conduct Cochran's Q test as follows, to test the null hypothesis there is no difference between the classification accuracies, p_i: H_0 = p_1 = p_2 = \\cdots = p_L : q, p_value = cochrans_q(y_true, y_model_1, y_model_2, y_model_3) print('Q: %.3f' % q) print('p-value: %.3f' % p_value) Q: 7.529 p-value: 0.023 Since the p-value is smaller than \\alpha , we can reject the null hypothesis and conclude that there is a difference between the classification accuracies. As mentioned in the introduction earlier, we could now perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions. Lastly, let's illustrate that Cochran's Q test is indeed just a generalized version of McNemar's test: chi2, p_value = cochrans_q(y_true, y_model_1, y_model_2) print('Cochran\\'s Q Chi^2: %.3f' % chi2) print('Cochran\\'s Q p-value: %.3f' % p_value) Cochran's Q Chi^2: 5.333 Cochran's Q p-value: 0.021 chi2, p_value = mcnemar(mcnemar_table(y_true, y_model_1, y_model_2), corrected=False) print('McNemar\\'s Chi^2: %.3f' % chi2) print('McNemar\\'s p-value: %.3f' % p_value) McNemar's Chi^2: 5.333 McNemar's p-value: 0.021 API cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/","title":"Cochran's Q Test"},{"location":"user_guide/evaluate/cochrans_q/#cochrans-q-test","text":"Cochran's Q test for comparing the performance of multiple classifiers. from mlxtend.evaluate import cochrans_q","title":"Cochran's Q Test"},{"location":"user_guide/evaluate/cochrans_q/#overview","text":"Cochran's Q test can be regarded as a generalized version of McNemar's test that can be applied to evaluate multiple classifiers. In a sense, Cochran's Q test is analogous to ANOVA for binary outcomes. To compare more than two classifiers, we can use Cochran's Q test, which has a test statistic Q that is approximately, (similar to McNemar's test), distributed as chi-squared with L-1 degrees of freedom, where L is the number of models we evaluate (since L=2 for McNemar's test, McNemars test statistic approximates a chi-squared distribution with one degree of freedom). More formally, Cochran's Q test tests the hypothesis that there is no difference between the classification accuracies [1]: p_i: H_0 = p_1 = p_2 = \\cdots = p_L. Let \\{D_1, \\dots , D_L\\} be a set of classifiers who have all been tested on the same dataset. If the L classifiers don't perform differently, then the following Q statistic is distributed approximately as \"chi-squared\" with L-1 degrees of freedom: Q_C = (L-1) \\frac{L \\sum^{L}_{i=1}G_{i}^{2} - T^2}{LT - \\sum^{N_{ts}}_{j=1} (L_j)^2}. Here, G_i is the number of objects out of N_{ts} correctly classified by D_i= 1, \\dots L ; L_j is the number of classifiers out of L that correctly classified object \\mathbf{z}_j \\in \\mathbf{Z}_{ts} , where \\mathbf{Z}_{ts} = \\{\\mathbf{z}_1, ... \\mathbf{z}_{N_{ts}}\\} is the test dataset on which the classifers are tested on; and T is the total number of correct number of votes among the L classifiers [2]: T = \\sum_{i=1}^{L} G_i = \\sum^{N_{ts}}_{j=1} L_j. To perform Cochran's Q test, we typically organize the classificier predictions in a binary N_{ts} \\times L matrix. The ij\\text{th} entry of such matrix is 0 if a classifier D_j has misclassified a data example (vector) \\mathbf{z}_i and 1 otherwise (if the classifier predicted the class label l(\\mathbf{z}_i) correctly) [2]. The following example taken from [2] illustrates how the classification results may be organized. For instance, assume we have the ground truth labels of the test dataset y_true and the following predictions by 3 classifiers ( y_model_1 , y_model_2 , and y_model_3 ): y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) The table of correct (1) and incorrect (0) classifications may then look as follows: D_1 (model 1) D_2 (model 2) D_3 (model 3) Occurrences 1 1 1 80 1 1 0 2 1 0 1 0 1 0 0 2 0 1 1 9 0 1 0 1 0 0 1 3 0 0 0 3 Accuracy 84/100*100% = 84% 92/100*100% = 92% 92/100*100% = 92% By plugging in the respective value into the previous equation, we obtain the following Q value [2]: Q_c = 2 \\times \\frac{3 \\times (84^2 + 92^2 + 92^2) - 268^2}{3\\times 268-(80 \\times 9 + 11 \\times 4 + 6 \\times 1)} \\approx 7.5294. (Note that the Q value in [2] is listed as 3.7647 due to a typo as discussed with the author, the value 7.5294 is the correct one.) Now, the Q value (approximating \\chi^2 ) corresponds to a p-value of approx. 0.023 assuming a \\chi^2 distribution with L-1 = 2 degrees of freedom. Assuming that we chose a significance level of \\alpha=0.05 , we would reject the null hypothesis that all classifiers perform equally well, since 0.023 < \\alpha . In practice, if we successfully rejected the null hypothesis, we could perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions.","title":"Overview"},{"location":"user_guide/evaluate/cochrans_q/#references","text":"[1] Fleiss, Joseph L., Bruce Levin, and Myunghee Cho Paik. Statistical methods for rates and proportions. John Wiley & Sons, 2013. [2] Kuncheva, Ludmila I. Combining pattern classifiers: methods and algorithms. John Wiley & Sons, 2004.","title":"References"},{"location":"user_guide/evaluate/cochrans_q/#example-1-cochrans-q-test","text":"import numpy as np from mlxtend.evaluate import cochrans_q from mlxtend.evaluate import mcnemar_table from mlxtend.evaluate import mcnemar ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) Assuming a significance level \\alpha=0.05 , we can conduct Cochran's Q test as follows, to test the null hypothesis there is no difference between the classification accuracies, p_i: H_0 = p_1 = p_2 = \\cdots = p_L : q, p_value = cochrans_q(y_true, y_model_1, y_model_2, y_model_3) print('Q: %.3f' % q) print('p-value: %.3f' % p_value) Q: 7.529 p-value: 0.023 Since the p-value is smaller than \\alpha , we can reject the null hypothesis and conclude that there is a difference between the classification accuracies. As mentioned in the introduction earlier, we could now perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions. Lastly, let's illustrate that Cochran's Q test is indeed just a generalized version of McNemar's test: chi2, p_value = cochrans_q(y_true, y_model_1, y_model_2) print('Cochran\\'s Q Chi^2: %.3f' % chi2) print('Cochran\\'s Q p-value: %.3f' % p_value) Cochran's Q Chi^2: 5.333 Cochran's Q p-value: 0.021 chi2, p_value = mcnemar(mcnemar_table(y_true, y_model_1, y_model_2), corrected=False) print('McNemar\\'s Chi^2: %.3f' % chi2) print('McNemar\\'s p-value: %.3f' % p_value) McNemar's Chi^2: 5.333 McNemar's p-value: 0.021","title":"Example 1 - Cochran's Q test"},{"location":"user_guide/evaluate/cochrans_q/#api","text":"cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/","title":"API"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/","text":"5x2cv combined F test 5x2cv combined F test procedure to compare the performance of two models from mlxtend.evaluate import combined_ftest_5x2cv Overview The 5x2cv combined F test is a procedure for comparing the performance of two models (classifiers or regressors) that was proposed by Alpaydin [1] as a more robust alternative to Dietterich's 5x2cv paired t-test procedure [2]. paired_ttest_5x2cv.md . Dietterich's 5x2cv method was in turn was designed to address shortcomings in other methods such as the resampled paired t test (see paired_ttest_resampled ) and the k-fold cross-validated paired t test (see paired_ttest_kfold_cv ). To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the 5x2cv paired t test, we repeat the splitting (50% training and 50% test data) 5 times. In each of the 5 iterations, we fit A and B to the training split and evaluate their performance ( p_A and p_B ) on the test split. Then, we rotate the training and test sets (the training set becomes the test set and vice versa) compute the performance again, which results in 2 performance difference measures: p^{(1)} = p^{(1)}_A - p^{(1)}_B and p^{(2)} = p^{(2)}_A - p^{(2)}_B. Then, we estimate the estimate mean and variance of the differences: \\overline{p} = \\frac{p^{(1)} + p^{(2)}}{2} and s^2 = (p^{(1)} - \\overline{p})^2 + (p^{(2)} - \\overline{p})^2. The F-statistic proposed by Alpaydin (see paper for justifications) is then computed as \\mathcal{f} = \\frac{\\sum_{i=1}^{5} \\sum_{j=1}^2 (p_i^{j})^2}{2 \\sum_{i=1}^5 s_i^2}, which is approximately F distributed with 10 and 5 degress of freedom. Using the f statistic, the p value can be computed and compared with a previously chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. References [1] Alpaydin, E. (1999). Combined 5\u00d72 cv F test for comparing supervised classification learning algorithms. Neural computation, 11(8), 1885-1892. [2] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923. Example 1 - 5x2cv combined F test Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1, solver='liblinear', multi_class='ovr') clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired f test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the 5x2cv f test: from mlxtend.evaluate import combined_ftest_5x2cv f, p = combined_ftest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('F statistic: %.3f' % f) print('p value: %.3f' % p) F statistic: 1.053 p value: 0.509 Since p > \\alpha , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) f, p = combined_ftest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('F statistic: %.3f' % f) print('p value: %.3f' % p) Decision tree accuracy: 63.16% F statistic: 34.934 p value: 0.001 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha . API combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/","title":"5x2cv combined *F* test"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/#5x2cv-combined-f-test","text":"5x2cv combined F test procedure to compare the performance of two models from mlxtend.evaluate import combined_ftest_5x2cv","title":"5x2cv combined F test"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/#overview","text":"The 5x2cv combined F test is a procedure for comparing the performance of two models (classifiers or regressors) that was proposed by Alpaydin [1] as a more robust alternative to Dietterich's 5x2cv paired t-test procedure [2]. paired_ttest_5x2cv.md . Dietterich's 5x2cv method was in turn was designed to address shortcomings in other methods such as the resampled paired t test (see paired_ttest_resampled ) and the k-fold cross-validated paired t test (see paired_ttest_kfold_cv ). To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the 5x2cv paired t test, we repeat the splitting (50% training and 50% test data) 5 times. In each of the 5 iterations, we fit A and B to the training split and evaluate their performance ( p_A and p_B ) on the test split. Then, we rotate the training and test sets (the training set becomes the test set and vice versa) compute the performance again, which results in 2 performance difference measures: p^{(1)} = p^{(1)}_A - p^{(1)}_B and p^{(2)} = p^{(2)}_A - p^{(2)}_B. Then, we estimate the estimate mean and variance of the differences: \\overline{p} = \\frac{p^{(1)} + p^{(2)}}{2} and s^2 = (p^{(1)} - \\overline{p})^2 + (p^{(2)} - \\overline{p})^2. The F-statistic proposed by Alpaydin (see paper for justifications) is then computed as \\mathcal{f} = \\frac{\\sum_{i=1}^{5} \\sum_{j=1}^2 (p_i^{j})^2}{2 \\sum_{i=1}^5 s_i^2}, which is approximately F distributed with 10 and 5 degress of freedom. Using the f statistic, the p value can be computed and compared with a previously chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models.","title":"Overview"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/#references","text":"[1] Alpaydin, E. (1999). Combined 5\u00d72 cv F test for comparing supervised classification learning algorithms. Neural computation, 11(8), 1885-1892. [2] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923.","title":"References"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/#example-1-5x2cv-combined-f-test","text":"Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1, solver='liblinear', multi_class='ovr') clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired f test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the 5x2cv f test: from mlxtend.evaluate import combined_ftest_5x2cv f, p = combined_ftest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('F statistic: %.3f' % f) print('p value: %.3f' % p) F statistic: 1.053 p value: 0.509 Since p > \\alpha , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) f, p = combined_ftest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('F statistic: %.3f' % f) print('p value: %.3f' % p) Decision tree accuracy: 63.16% F statistic: 34.934 p value: 0.001 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha .","title":"Example 1 - 5x2cv combined F test"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/#api","text":"combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/","title":"API"},{"location":"user_guide/evaluate/confusion_matrix/","text":"Confusion Matrix Functions for generating confusion matrices. from mlxtend.evaluate import confusion_matrix from mlxtend.plotting import plot_confusion_matrix Overview Confusion Matrix The confusion matrix (or error matrix ) is one way to summarize the performance of a classifier for binary classification tasks. This square matrix consists of columns and rows that list the number of instances as absolute or relative \"actual class\" vs. \"predicted class\" ratios. Let P be the label of class 1 and N be the label of a second class or the label of all classes that are not class 1 in a multi-class setting. References - Example 1 - Binary classification from mlxtend.evaluate import confusion_matrix y_target = [0, 0, 1, 0, 0, 1, 1, 1] y_predicted = [1, 0, 1, 0, 0, 0, 0, 1] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted) cm array([[3, 1], [2, 2]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : import matplotlib.pyplot as plt from mlxtend.plotting import plot_confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show() Example 2 - Multi-class classification from mlxtend.evaluate import confusion_matrix y_target = [1, 1, 1, 0, 0, 2, 0, 3] y_predicted = [1, 0, 1, 0, 0, 2, 1, 3] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted, binary=False) cm array([[2, 1, 0, 0], [1, 2, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : import matplotlib.pyplot as plt from mlxtend.evaluate import confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show() Example 3 - Multi-class to binary By setting binary=True , all class labels that are not the positive class label are being summarized to class 0. The positive class label becomes class 1. import matplotlib.pyplot as plt from mlxtend.evaluate import confusion_matrix y_target = [1, 1, 1, 0, 0, 2, 0, 3] y_predicted = [1, 0, 1, 0, 0, 2, 1, 3] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted, binary=True, positive_label=1) cm array([[4, 1], [1, 2]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : from mlxtend.plotting import plot_confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show() API confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/","title":"Confusion Matrix"},{"location":"user_guide/evaluate/confusion_matrix/#confusion-matrix","text":"Functions for generating confusion matrices. from mlxtend.evaluate import confusion_matrix from mlxtend.plotting import plot_confusion_matrix","title":"Confusion Matrix"},{"location":"user_guide/evaluate/confusion_matrix/#overview","text":"","title":"Overview"},{"location":"user_guide/evaluate/confusion_matrix/#confusion-matrix_1","text":"The confusion matrix (or error matrix ) is one way to summarize the performance of a classifier for binary classification tasks. This square matrix consists of columns and rows that list the number of instances as absolute or relative \"actual class\" vs. \"predicted class\" ratios. Let P be the label of class 1 and N be the label of a second class or the label of all classes that are not class 1 in a multi-class setting.","title":"Confusion Matrix"},{"location":"user_guide/evaluate/confusion_matrix/#references","text":"-","title":"References"},{"location":"user_guide/evaluate/confusion_matrix/#example-1-binary-classification","text":"from mlxtend.evaluate import confusion_matrix y_target = [0, 0, 1, 0, 0, 1, 1, 1] y_predicted = [1, 0, 1, 0, 0, 0, 0, 1] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted) cm array([[3, 1], [2, 2]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : import matplotlib.pyplot as plt from mlxtend.plotting import plot_confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show()","title":"Example 1 - Binary classification"},{"location":"user_guide/evaluate/confusion_matrix/#example-2-multi-class-classification","text":"from mlxtend.evaluate import confusion_matrix y_target = [1, 1, 1, 0, 0, 2, 0, 3] y_predicted = [1, 0, 1, 0, 0, 2, 1, 3] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted, binary=False) cm array([[2, 1, 0, 0], [1, 2, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : import matplotlib.pyplot as plt from mlxtend.evaluate import confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show()","title":"Example 2 - Multi-class classification"},{"location":"user_guide/evaluate/confusion_matrix/#example-3-multi-class-to-binary","text":"By setting binary=True , all class labels that are not the positive class label are being summarized to class 0. The positive class label becomes class 1. import matplotlib.pyplot as plt from mlxtend.evaluate import confusion_matrix y_target = [1, 1, 1, 0, 0, 2, 0, 3] y_predicted = [1, 0, 1, 0, 0, 2, 1, 3] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted, binary=True, positive_label=1) cm array([[4, 1], [1, 2]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : from mlxtend.plotting import plot_confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show()","title":"Example 3 - Multi-class to binary"},{"location":"user_guide/evaluate/confusion_matrix/#api","text":"confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/","title":"API"},{"location":"user_guide/evaluate/feature_importance_permutation/","text":"Feature Importance Permutation A function to estimate the feature importance of classifiers and regressors based on permutation importance . from mlxtend.evaluate import feature_importance_permutation Overview The permutation importance is an intuitive, model-agnostic method to estimate the feature importance for classifier and regression models. The approach is relatively simple and straight-forward: Take a model that was fit to the training dataset Estimate the predictive performance of the model on an independent dataset (e.g., validation dataset) and record it as the baseline performance For each feature i : randomly permute feature column i in the original dataset record the predictive performance of the model on the dataset with the permuted column compute the feature importance as the difference between the baseline performance (step 2) and the performance on the permuted dataset Permutation importance is generally considered as a relatively efficient technique that works well in practice [1], while a drawback is that the importance of correlated features may be overestimated [2]. References [1] Terence Parr, Kerem Turgutlu, Christopher Csiszar, and Jeremy Howard. Beware Default Random Forest Importances (http://parrt.cs.usfca.edu/doc/rf-importance/index.html) [2] Strobl, C., Boulesteix, A. L., Kneib, T., Augustin, T., & Zeileis, A. (2008). Conditional variable importance for random forests. BMC bioinformatics, 9(1), 307. Example 1 -- Feature Importance for Classifiers The following example illustrates the feature importance estimation via permutation importance based for classification models. import numpy as np import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.model_selection import train_test_split from mlxtend.evaluate import feature_importance_permutation Generate a toy dataset from sklearn.datasets import make_classification from sklearn.ensemble import RandomForestClassifier # Build a classification task using 3 informative features X, y = make_classification(n_samples=10000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, n_classes=2, random_state=0, shuffle=False) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=1, stratify=y) Feature importance via random forest First, we compute the feature importance directly from the random forest via mean impurity decrease (described after the code section): forest = RandomForestClassifier(n_estimators=250, random_state=0) forest.fit(X_train, y_train) print('Training accuracy:', np.mean(forest.predict(X_train) == y_train)*100) print('Test accuracy:', np.mean(forest.predict(X_test) == y_test)*100) importance_vals = forest.feature_importances_ print(importance_vals) Training accuracy: 100.0 Test accuracy: 95.0666666667 [ 0.283357 0.30846795 0.24204291 0.02229767 0.02364941 0.02390578 0.02501543 0.0234225 0.02370816 0.0241332 ] There are several strategies for computing the feature importance in random forest. The method implemented in scikit-learn (used in the next code example) is based on the Breiman and Friedman's CART (Breiman, Friedman, \"Classification and regression trees\", 1984), the so-called mean impurity decrease . Here, the importance value of a features is computed by averaging the impurity decrease for that feature, when splitting a parent node into two child nodes, across all the trees in the ensemble. Note that the impurity decrease values are weighted by the number of samples that are in the respective nodes. This process is repeated for all features in the dataset, and the feature importance values are then normalized so that they sum up to 1. In CART, the authors also note that this fast way of computing feature importance values is relatively consistent with the permutation importance. Next, let's visualize the feature importance values from the random forest including a measure of the mean impurity decrease variability (here: standard deviation): std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importance_vals)[::-1] # Plot the feature importances of the forest plt.figure() plt.title(\"Random Forest feature importance\") plt.bar(range(X.shape[1]), importance_vals[indices], yerr=std[indices], align=\"center\") plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show() As we can see, the features 1, 0, and 2 are estimated to be the most informative ones for the random forest classier. Next, let's compute the feature importance via the permutation importance approach. Permutation Importance imp_vals, _ = feature_importance_permutation( predict_method=forest.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=1, seed=1) imp_vals array([ 0.26833333, 0.26733333, 0.261 , -0.002 , -0.00033333, 0.00066667, 0.00233333, 0.00066667, 0.00066667, -0.00233333]) Note that the feature_importance_permutation returns two arrays. The first array (here: imp_vals ) contains the actual importance values we are interested in. If num_rounds > 1 , the permutation is repeated multiple times (with different random seeds), and in this case the first array contains the average value of the importance computed from the different runs. The second array (here, assigned to _ , because we are not using it) then contains all individual values from these runs (more about that later). Now, let's also visualize the importance values in a barplot: indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"Random Forest feature importance via permutation importance\") plt.bar(range(X.shape[1]), imp_vals[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show() As we can see, also here, features 1, 0, and 2 are predicted to be the most important ones, which is consistent with the feature importance values that we computed via the mean impurity decrease method earlier. (Note that in the context of random forests, the feature importance via permutation importance is typically computed using the out-of-bag samples of a random forest, whereas in this implementation, an independent dataset is used.) Previously, it was mentioned that the permutation is repeated multiple times if num_rounds > 1 . In this case, the second array returned by the feature_importance_permutation contains the importance values for these individual runs (the array has shape [num_features, num_rounds), which we can use to compute some sort of variability between these runs. imp_vals, imp_all = feature_importance_permutation( predict_method=forest.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=10, seed=1) std = np.std(imp_all, axis=1) indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"Random Forest feature importance via permutation importance w. std. dev.\") plt.bar(range(X.shape[1]), imp_vals[indices], yerr=std[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show() It shall be noted that the feature importance values do not sum up to one, since they are not normalized (you can normalize them if you'd like, by dividing these by the sum of importance values). Here, the main point is to look at the importance values relative to each other and not to over-interpret the absolute values. Support Vector Machines While the permutation importance approach yields results that are generally consistent with the mean impurity decrease feature importance values from a random forest, it's a method that is model-agnostic and can be used with any kind of classifier or regressor. The example below applies the feature_importance_permutation function to a support vector machine: from sklearn.svm import SVC svm = SVC(C=1.0, kernel='rbf') svm.fit(X_train, y_train) print('Training accuracy', np.mean(svm.predict(X_train) == y_train)*100) print('Test accuracy', np.mean(svm.predict(X_test) == y_test)*100) Training accuracy 95.0857142857 Test accuracy 94.9666666667 imp_vals, imp_all = feature_importance_permutation( predict_method=svm.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=10, seed=1) std = np.std(imp_all, axis=1) indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"SVM feature importance via permutation importance\") plt.bar(range(X.shape[1]), imp_vals[indices], yerr=std[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show() Example 1 -- Feature Importance for Regressors import numpy as np import matplotlib.pyplot as plt from mlxtend.evaluate import feature_importance_permutation from sklearn.model_selection import train_test_split from sklearn.datasets import make_regression from sklearn.svm import SVR X, y = make_regression(n_samples=1000, n_features=5, n_informative=2, n_targets=1, random_state=123, shuffle=False) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=123) svm = SVR(kernel='rbf') svm.fit(X_train, y_train) imp_vals, _ = feature_importance_permutation( predict_method=svm.predict, X=X_test, y=y_test, metric='r2', num_rounds=1, seed=1) imp_vals array([ 0.43676245, 0.22231268, 0.00146906, 0.01611528, -0.00522067]) plt.figure() plt.bar(range(X.shape[1]), imp_vals) plt.xticks(range(X.shape[1])) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show() API feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/","title":"Feature Importance Permutation"},{"location":"user_guide/evaluate/feature_importance_permutation/#feature-importance-permutation","text":"A function to estimate the feature importance of classifiers and regressors based on permutation importance . from mlxtend.evaluate import feature_importance_permutation","title":"Feature Importance Permutation"},{"location":"user_guide/evaluate/feature_importance_permutation/#overview","text":"The permutation importance is an intuitive, model-agnostic method to estimate the feature importance for classifier and regression models. The approach is relatively simple and straight-forward: Take a model that was fit to the training dataset Estimate the predictive performance of the model on an independent dataset (e.g., validation dataset) and record it as the baseline performance For each feature i : randomly permute feature column i in the original dataset record the predictive performance of the model on the dataset with the permuted column compute the feature importance as the difference between the baseline performance (step 2) and the performance on the permuted dataset Permutation importance is generally considered as a relatively efficient technique that works well in practice [1], while a drawback is that the importance of correlated features may be overestimated [2].","title":"Overview"},{"location":"user_guide/evaluate/feature_importance_permutation/#references","text":"[1] Terence Parr, Kerem Turgutlu, Christopher Csiszar, and Jeremy Howard. Beware Default Random Forest Importances (http://parrt.cs.usfca.edu/doc/rf-importance/index.html) [2] Strobl, C., Boulesteix, A. L., Kneib, T., Augustin, T., & Zeileis, A. (2008). Conditional variable importance for random forests. BMC bioinformatics, 9(1), 307.","title":"References"},{"location":"user_guide/evaluate/feature_importance_permutation/#example-1-feature-importance-for-classifiers","text":"The following example illustrates the feature importance estimation via permutation importance based for classification models. import numpy as np import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.model_selection import train_test_split from mlxtend.evaluate import feature_importance_permutation","title":"Example 1 -- Feature Importance for Classifiers"},{"location":"user_guide/evaluate/feature_importance_permutation/#generate-a-toy-dataset","text":"from sklearn.datasets import make_classification from sklearn.ensemble import RandomForestClassifier # Build a classification task using 3 informative features X, y = make_classification(n_samples=10000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, n_classes=2, random_state=0, shuffle=False) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=1, stratify=y)","title":"Generate a toy dataset"},{"location":"user_guide/evaluate/feature_importance_permutation/#feature-importance-via-random-forest","text":"First, we compute the feature importance directly from the random forest via mean impurity decrease (described after the code section): forest = RandomForestClassifier(n_estimators=250, random_state=0) forest.fit(X_train, y_train) print('Training accuracy:', np.mean(forest.predict(X_train) == y_train)*100) print('Test accuracy:', np.mean(forest.predict(X_test) == y_test)*100) importance_vals = forest.feature_importances_ print(importance_vals) Training accuracy: 100.0 Test accuracy: 95.0666666667 [ 0.283357 0.30846795 0.24204291 0.02229767 0.02364941 0.02390578 0.02501543 0.0234225 0.02370816 0.0241332 ] There are several strategies for computing the feature importance in random forest. The method implemented in scikit-learn (used in the next code example) is based on the Breiman and Friedman's CART (Breiman, Friedman, \"Classification and regression trees\", 1984), the so-called mean impurity decrease . Here, the importance value of a features is computed by averaging the impurity decrease for that feature, when splitting a parent node into two child nodes, across all the trees in the ensemble. Note that the impurity decrease values are weighted by the number of samples that are in the respective nodes. This process is repeated for all features in the dataset, and the feature importance values are then normalized so that they sum up to 1. In CART, the authors also note that this fast way of computing feature importance values is relatively consistent with the permutation importance. Next, let's visualize the feature importance values from the random forest including a measure of the mean impurity decrease variability (here: standard deviation): std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importance_vals)[::-1] # Plot the feature importances of the forest plt.figure() plt.title(\"Random Forest feature importance\") plt.bar(range(X.shape[1]), importance_vals[indices], yerr=std[indices], align=\"center\") plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show() As we can see, the features 1, 0, and 2 are estimated to be the most informative ones for the random forest classier. Next, let's compute the feature importance via the permutation importance approach.","title":"Feature importance via random forest"},{"location":"user_guide/evaluate/feature_importance_permutation/#permutation-importance","text":"imp_vals, _ = feature_importance_permutation( predict_method=forest.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=1, seed=1) imp_vals array([ 0.26833333, 0.26733333, 0.261 , -0.002 , -0.00033333, 0.00066667, 0.00233333, 0.00066667, 0.00066667, -0.00233333]) Note that the feature_importance_permutation returns two arrays. The first array (here: imp_vals ) contains the actual importance values we are interested in. If num_rounds > 1 , the permutation is repeated multiple times (with different random seeds), and in this case the first array contains the average value of the importance computed from the different runs. The second array (here, assigned to _ , because we are not using it) then contains all individual values from these runs (more about that later). Now, let's also visualize the importance values in a barplot: indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"Random Forest feature importance via permutation importance\") plt.bar(range(X.shape[1]), imp_vals[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show() As we can see, also here, features 1, 0, and 2 are predicted to be the most important ones, which is consistent with the feature importance values that we computed via the mean impurity decrease method earlier. (Note that in the context of random forests, the feature importance via permutation importance is typically computed using the out-of-bag samples of a random forest, whereas in this implementation, an independent dataset is used.) Previously, it was mentioned that the permutation is repeated multiple times if num_rounds > 1 . In this case, the second array returned by the feature_importance_permutation contains the importance values for these individual runs (the array has shape [num_features, num_rounds), which we can use to compute some sort of variability between these runs. imp_vals, imp_all = feature_importance_permutation( predict_method=forest.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=10, seed=1) std = np.std(imp_all, axis=1) indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"Random Forest feature importance via permutation importance w. std. dev.\") plt.bar(range(X.shape[1]), imp_vals[indices], yerr=std[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show() It shall be noted that the feature importance values do not sum up to one, since they are not normalized (you can normalize them if you'd like, by dividing these by the sum of importance values). Here, the main point is to look at the importance values relative to each other and not to over-interpret the absolute values.","title":"Permutation Importance"},{"location":"user_guide/evaluate/feature_importance_permutation/#support-vector-machines","text":"While the permutation importance approach yields results that are generally consistent with the mean impurity decrease feature importance values from a random forest, it's a method that is model-agnostic and can be used with any kind of classifier or regressor. The example below applies the feature_importance_permutation function to a support vector machine: from sklearn.svm import SVC svm = SVC(C=1.0, kernel='rbf') svm.fit(X_train, y_train) print('Training accuracy', np.mean(svm.predict(X_train) == y_train)*100) print('Test accuracy', np.mean(svm.predict(X_test) == y_test)*100) Training accuracy 95.0857142857 Test accuracy 94.9666666667 imp_vals, imp_all = feature_importance_permutation( predict_method=svm.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=10, seed=1) std = np.std(imp_all, axis=1) indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"SVM feature importance via permutation importance\") plt.bar(range(X.shape[1]), imp_vals[indices], yerr=std[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show()","title":"Support Vector Machines"},{"location":"user_guide/evaluate/feature_importance_permutation/#example-1-feature-importance-for-regressors","text":"import numpy as np import matplotlib.pyplot as plt from mlxtend.evaluate import feature_importance_permutation from sklearn.model_selection import train_test_split from sklearn.datasets import make_regression from sklearn.svm import SVR X, y = make_regression(n_samples=1000, n_features=5, n_informative=2, n_targets=1, random_state=123, shuffle=False) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=123) svm = SVR(kernel='rbf') svm.fit(X_train, y_train) imp_vals, _ = feature_importance_permutation( predict_method=svm.predict, X=X_test, y=y_test, metric='r2', num_rounds=1, seed=1) imp_vals array([ 0.43676245, 0.22231268, 0.00146906, 0.01611528, -0.00522067]) plt.figure() plt.bar(range(X.shape[1]), imp_vals) plt.xticks(range(X.shape[1])) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show()","title":"Example 1 -- Feature Importance for Regressors"},{"location":"user_guide/evaluate/feature_importance_permutation/#api","text":"feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/","title":"API"},{"location":"user_guide/evaluate/ftest/","text":"F-Test F-test for comparing the performance of multiple classifiers. from mlxtend.evaluate import ftest Overview In the context of evaluating machine learning models, the F-test by George W. Snedecor [1] can be regarded as analogous to Cochran's Q test that can be applied to evaluate multiple classifiers (i.e., whether their accuracies estimated on a test set differ) as described by Looney [2][3]. More formally, assume the task to test the null hypothesis that there is no difference between the classification accuracies [1]: p_i: H_0 = p_1 = p_2 = \\cdots = p_L. Let \\{D_1, \\dots , D_L\\} be a set of classifiers who have all been tested on the same dataset. If the L classifiers don't perform differently, then the F statistic is distributed according to an F distribution with (L-1 ) and (L-1)\\times N degrees of freedom, where N is the number of examples in the test set. The calculation of the F statistic consists of several components, which are listed below (adopted from [3]). Sum of squares of the classifiers: SSA = N \\sum_{i=1}^{N} (L_j)^2, where L_j is the number of classifiers out of L that correctly classified object \\mathbf{z}_j \\in \\mathbf{Z}_{N} , where \\mathbf{Z}_{N} = \\{\\mathbf{z}_1, ... \\mathbf{z}_{N}\\} is the test dataset on which the classifers are tested on. The sum of squares for the objects: SSB= \\frac{1}{L} \\sum_{j=1}^N (L_j)^2 - L\\cdot N \\cdot ACC_{avg}^2, where ACC_{avg} is the average of the accuracies of the different models ACC_{avg} = \\sum_{i=1}^L ACC_i . The total sum of squares: SST = L\\cdot N \\cdot ACC_{avg}^2 (1 - ACC_{avg}^2). The sum of squares for the classification--object interaction: SSAB = SST - SSA - SSB. The mean SSA and mean SSAB values: MSA = \\frac{SSA}{L-1}, and MSAB = \\frac{SSAB}{(L-1) (N-1)}. From the MSA and MSAB, we can then calculate the F-value as F = \\frac{MSA}{MSAB}. After computing the F-value, we can then look up the p-value from a F-distribution table for the corresponding degrees of freedom or obtain it computationally from a cumulative F-distribution function. In practice, if we successfully rejected the null hypothesis at a previously chosen significance threshold, we could perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions. References [1] Snedecor, George W. and Cochran, William G. (1989), Statistical Methods, Eighth Edition, Iowa State University Press. [2] Looney, Stephen W. \"A statistical technique for comparing the accuracies of several classifiers.\" Pattern Recognition Letters 8, no. 1 (1988): 5-9. [3] Kuncheva, Ludmila I. Combining pattern classifiers: methods and algorithms. John Wiley & Sons, 2004. Example 1 - F-test import numpy as np from mlxtend.evaluate import ftest ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) Assuming a significance level \\alpha=0.05 , we can conduct Cochran's Q test as follows, to test the null hypothesis there is no difference between the classification accuracies, p_i: H_0 = p_1 = p_2 = \\cdots = p_L : f, p_value = ftest(y_true, y_model_1, y_model_2, y_model_3) print('F: %.3f' % f) print('p-value: %.3f' % p_value) F: 3.873 p-value: 0.022 Since the p-value is smaller than \\alpha , we can reject the null hypothesis and conclude that there is a difference between the classification accuracies. As mentioned in the introduction earlier, we could now perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions. API ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/","title":"F-Test"},{"location":"user_guide/evaluate/ftest/#f-test","text":"F-test for comparing the performance of multiple classifiers. from mlxtend.evaluate import ftest","title":"F-Test"},{"location":"user_guide/evaluate/ftest/#overview","text":"In the context of evaluating machine learning models, the F-test by George W. Snedecor [1] can be regarded as analogous to Cochran's Q test that can be applied to evaluate multiple classifiers (i.e., whether their accuracies estimated on a test set differ) as described by Looney [2][3]. More formally, assume the task to test the null hypothesis that there is no difference between the classification accuracies [1]: p_i: H_0 = p_1 = p_2 = \\cdots = p_L. Let \\{D_1, \\dots , D_L\\} be a set of classifiers who have all been tested on the same dataset. If the L classifiers don't perform differently, then the F statistic is distributed according to an F distribution with (L-1 ) and (L-1)\\times N degrees of freedom, where N is the number of examples in the test set. The calculation of the F statistic consists of several components, which are listed below (adopted from [3]). Sum of squares of the classifiers: SSA = N \\sum_{i=1}^{N} (L_j)^2, where L_j is the number of classifiers out of L that correctly classified object \\mathbf{z}_j \\in \\mathbf{Z}_{N} , where \\mathbf{Z}_{N} = \\{\\mathbf{z}_1, ... \\mathbf{z}_{N}\\} is the test dataset on which the classifers are tested on. The sum of squares for the objects: SSB= \\frac{1}{L} \\sum_{j=1}^N (L_j)^2 - L\\cdot N \\cdot ACC_{avg}^2, where ACC_{avg} is the average of the accuracies of the different models ACC_{avg} = \\sum_{i=1}^L ACC_i . The total sum of squares: SST = L\\cdot N \\cdot ACC_{avg}^2 (1 - ACC_{avg}^2). The sum of squares for the classification--object interaction: SSAB = SST - SSA - SSB. The mean SSA and mean SSAB values: MSA = \\frac{SSA}{L-1}, and MSAB = \\frac{SSAB}{(L-1) (N-1)}. From the MSA and MSAB, we can then calculate the F-value as F = \\frac{MSA}{MSAB}. After computing the F-value, we can then look up the p-value from a F-distribution table for the corresponding degrees of freedom or obtain it computationally from a cumulative F-distribution function. In practice, if we successfully rejected the null hypothesis at a previously chosen significance threshold, we could perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions.","title":"Overview"},{"location":"user_guide/evaluate/ftest/#references","text":"[1] Snedecor, George W. and Cochran, William G. (1989), Statistical Methods, Eighth Edition, Iowa State University Press. [2] Looney, Stephen W. \"A statistical technique for comparing the accuracies of several classifiers.\" Pattern Recognition Letters 8, no. 1 (1988): 5-9. [3] Kuncheva, Ludmila I. Combining pattern classifiers: methods and algorithms. John Wiley & Sons, 2004.","title":"References"},{"location":"user_guide/evaluate/ftest/#example-1-f-test","text":"import numpy as np from mlxtend.evaluate import ftest ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) Assuming a significance level \\alpha=0.05 , we can conduct Cochran's Q test as follows, to test the null hypothesis there is no difference between the classification accuracies, p_i: H_0 = p_1 = p_2 = \\cdots = p_L : f, p_value = ftest(y_true, y_model_1, y_model_2, y_model_3) print('F: %.3f' % f) print('p-value: %.3f' % p_value) F: 3.873 p-value: 0.022 Since the p-value is smaller than \\alpha , we can reject the null hypothesis and conclude that there is a difference between the classification accuracies. As mentioned in the introduction earlier, we could now perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions.","title":"Example 1 - F-test"},{"location":"user_guide/evaluate/ftest/#api","text":"ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/","title":"API"},{"location":"user_guide/evaluate/lift_score/","text":"Lift Score Scoring function to compute the LIFT metric, the ratio of correctly predicted positive examples and the actual positive examples in the test dataset. from mlxtend.evaluate import lift_score Overview In the context of classification, lift [1] compares model predictions to randomly generated predictions. Lift is often used in marketing research combined with gain and lift charts as a visual aid [2]. For example, assuming a 10% customer response as a baseline, a lift value of 3 would correspond to a 30% customer response when using the predictive model. Note that lift has the range \\lbrack 0, \\infty \\rbrack . There are many strategies to compute lift , and below, we will illustrate the computation of the lift score using a classic confusion matrix. For instance, let's assume the following prediction and target labels, where \"1\" is the positive class: \\text{true labels}: [0, 0, 1, 0, 0, 1, 1, 1, 1, 1] \\text{prediction}: [1, 0, 1, 0, 0, 0, 0, 1, 0, 0] Then, our confusion matrix would look as follows: Based on the confusion matrix above, with \"1\" as positive label, we compute lift as follows: \\text{lift} = \\frac{(TP/(TP+FP)}{(TP+FN)/(TP+TN+FP+FN)} Plugging in the actual values from the example above, we arrive at the following lift value: \\frac{2/(2+1)}{(2+4)/(2+3+1+4)} = 1.1111111111111112 An alternative way to computing lift is by using the support metric [3]: \\text{lift} = \\frac{\\text{support}(\\text{true labels} \\cap \\text{prediction})}{\\text{support}(\\text{true labels}) \\times \\text{support}(\\text{prediction})}, Support is x / N , where x is the number of incidences of an observation and N is the total number of samples in the datset. \\text{true labels} \\cap \\text{prediction} are the true positives, true labels are true positives plus false negatives, and prediction are true positives plus false positives. Plugging the values from our example into the equation above, we arrive at: \\frac{2/10}{(6/10 \\times 3/10)} = 1.1111111111111112 References [1] S. Brin, R. Motwani, J. D. Ullman, and S. Tsur. Dynamic itemset counting and implication rules for market basket data . In Proc. of the ACM SIGMOD Int'l Conf. on Management of Data (ACM SIGMOD '97), pages 265-276, 1997. [2] https://www3.nd.edu/~busiforc/Lift_chart.html [3] https://en.wikipedia.org/wiki/Association_rule_learning#Support Example 1 - Computing Lift This examples demonstrates the basic use of the lift_score function using the example from the Overview section. import numpy as np from mlxtend.evaluate import lift_score y_target = np.array([0, 0, 1, 0, 0, 1, 1, 1, 1, 1]) y_predicted = np.array([1, 0, 1, 0, 0, 0, 0, 1, 0, 0]) lift_score(y_target, y_predicted) 1.1111111111111112 Example 2 - Using lift_score in GridSearch The lift_score function can also be used with scikit-learn objects, such as GridSearch : from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC from sklearn.metrics import make_scorer # make custom scorer lift_scorer = make_scorer(lift_score) iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, stratify=y, random_state=123) hyperparameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] clf = GridSearchCV(SVC(), hyperparameters, cv=10, scoring=lift_scorer) clf.fit(X_train, y_train) print(clf.best_score_) print(clf.best_params_) 3.0 {'gamma': 0.001, 'kernel': 'rbf', 'C': 1000} API lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP/(TP+FN) ] / [ (TP+FP) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/","title":"Lift Score"},{"location":"user_guide/evaluate/lift_score/#lift-score","text":"Scoring function to compute the LIFT metric, the ratio of correctly predicted positive examples and the actual positive examples in the test dataset. from mlxtend.evaluate import lift_score","title":"Lift Score"},{"location":"user_guide/evaluate/lift_score/#overview","text":"In the context of classification, lift [1] compares model predictions to randomly generated predictions. Lift is often used in marketing research combined with gain and lift charts as a visual aid [2]. For example, assuming a 10% customer response as a baseline, a lift value of 3 would correspond to a 30% customer response when using the predictive model. Note that lift has the range \\lbrack 0, \\infty \\rbrack . There are many strategies to compute lift , and below, we will illustrate the computation of the lift score using a classic confusion matrix. For instance, let's assume the following prediction and target labels, where \"1\" is the positive class: \\text{true labels}: [0, 0, 1, 0, 0, 1, 1, 1, 1, 1] \\text{prediction}: [1, 0, 1, 0, 0, 0, 0, 1, 0, 0] Then, our confusion matrix would look as follows: Based on the confusion matrix above, with \"1\" as positive label, we compute lift as follows: \\text{lift} = \\frac{(TP/(TP+FP)}{(TP+FN)/(TP+TN+FP+FN)} Plugging in the actual values from the example above, we arrive at the following lift value: \\frac{2/(2+1)}{(2+4)/(2+3+1+4)} = 1.1111111111111112 An alternative way to computing lift is by using the support metric [3]: \\text{lift} = \\frac{\\text{support}(\\text{true labels} \\cap \\text{prediction})}{\\text{support}(\\text{true labels}) \\times \\text{support}(\\text{prediction})}, Support is x / N , where x is the number of incidences of an observation and N is the total number of samples in the datset. \\text{true labels} \\cap \\text{prediction} are the true positives, true labels are true positives plus false negatives, and prediction are true positives plus false positives. Plugging the values from our example into the equation above, we arrive at: \\frac{2/10}{(6/10 \\times 3/10)} = 1.1111111111111112","title":"Overview"},{"location":"user_guide/evaluate/lift_score/#references","text":"[1] S. Brin, R. Motwani, J. D. Ullman, and S. Tsur. Dynamic itemset counting and implication rules for market basket data . In Proc. of the ACM SIGMOD Int'l Conf. on Management of Data (ACM SIGMOD '97), pages 265-276, 1997. [2] https://www3.nd.edu/~busiforc/Lift_chart.html [3] https://en.wikipedia.org/wiki/Association_rule_learning#Support","title":"References"},{"location":"user_guide/evaluate/lift_score/#example-1-computing-lift","text":"This examples demonstrates the basic use of the lift_score function using the example from the Overview section. import numpy as np from mlxtend.evaluate import lift_score y_target = np.array([0, 0, 1, 0, 0, 1, 1, 1, 1, 1]) y_predicted = np.array([1, 0, 1, 0, 0, 0, 0, 1, 0, 0]) lift_score(y_target, y_predicted) 1.1111111111111112","title":"Example 1 - Computing Lift"},{"location":"user_guide/evaluate/lift_score/#example-2-using-lift_score-in-gridsearch","text":"The lift_score function can also be used with scikit-learn objects, such as GridSearch : from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC from sklearn.metrics import make_scorer # make custom scorer lift_scorer = make_scorer(lift_score) iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, stratify=y, random_state=123) hyperparameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] clf = GridSearchCV(SVC(), hyperparameters, cv=10, scoring=lift_scorer) clf.fit(X_train, y_train) print(clf.best_score_) print(clf.best_params_) 3.0 {'gamma': 0.001, 'kernel': 'rbf', 'C': 1000}","title":"Example 2 - Using lift_score in GridSearch"},{"location":"user_guide/evaluate/lift_score/#api","text":"lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP/(TP+FN) ] / [ (TP+FP) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/","title":"API"},{"location":"user_guide/evaluate/mcnemar/","text":"McNemar's Test McNemar's test for paired nominal data from mlxtend.evaluate import mcnemar Overview McNemar's Test [1] (sometimes also called \"within-subjects chi-squared test\") is a statistical test for paired nominal data. In context of machine learning (or statistical) models, we can use McNemar's Test to compare the predictive accuracy of two models. McNemar's test is based on a 2 times 2 contigency table of the two model's predictions. McNemar's Test Statistic In McNemar's Test, we formulate the null hypothesis that the probabilities p(b) and p(c) are the same, or in simplified terms: None of the two models performs better than the other. Thus, the alternative hypothesis is that the performances of the two models are not equal. The McNemar test statistic (\"chi-squared\") can be computed as follows: \\chi^2 = \\frac{(b - c)^2}{(b + c)}, If the sum of cell c and b is sufficiently large, the \\chi^2 value follows a chi-squared distribution with one degree of freedom. After setting a significance threshold, e.g,. \\alpha=0.05 we can compute the p-value -- assuming that the null hypothesis is true, the p-value is the probability of observing this empirical (or a larger) chi-squared value. If the p-value is lower than our chosen significance level, we can reject the null hypothesis that the two model's performances are equal. Continuity Correction Approximately 1 year after Quinn McNemar published the McNemar Test [1], Edwards [2] proposed a continuity corrected version, which is the more commonly used variant today: \\chi^2 = \\frac{( \\mid b - c \\mid - 1)^2}{(b + c)}. Exact p-values As mentioned earlier, an exact binomial test is recommended for small sample sizes ( b + c < 25 [3]), since the chi-squared value is may not be well-approximated by the chi-squared distribution. The exact p-value can be computed as follows: p = 2 \\sum^{n}_{i=b} \\binom{n}{i} 0.5^i (1 - 0.5)^{n-i}, where n = b + c , and the factor 2 is used to compute the two-sided p-value. Example For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose. In the following coding examples, we will use these 2 scenarios A and B to illustrate McNemar's test. References [1] McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. [2] Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. [3] https://en.wikipedia.org/wiki/McNemar%27s_test Example 1 - Creating 2x2 Contigency tables The mcnemar funtion expects a 2x2 contingency table as a NumPy array that is formatted as follows: Such a contigency matrix can be created by using the mcnemar_table function from mlxtend.evaluate . For example: import numpy as np from mlxtend.evaluate import mcnemar_table # The correct target (class) labels y_target = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) # Class labels predicted by model 1 y_model1 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) # Class labels predicted by model 2 y_model2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_table(y_target=y_target, y_model1=y_model1, y_model2=y_model2) print(tb) [[4 1] [2 3]] Example 2 - McNemar's Test for Scenario B No, let us continue with the example mentioned in the overview section and assume that we already computed the 2x2 contigency table: import numpy as np tb_b = np.array([[9945, 25], [15, 15]]) To test the null hypothesis that the predictive performance of two models are equal (using a significance level of \\alpha=0.05 ), we can conduct a corrected McNemar Test for computing the chi-squared and p-value as follows: from mlxtend.evaluate import mcnemar chi2, p = mcnemar(ary=tb_b, corrected=True) print('chi-squared:', chi2) print('p-value:', p) chi-squared: 2.025 p-value: 0.154728923485 Since the p-value is larger than our assumed significance threshold ( \\alpha=0.05 ), we cannot reject our null hypothesis and assume that there is no significant difference between the two predictive models. Example 3 - McNemar's Test for Scenario A In contrast to scenario B (Example 2), the sample size in scenario A is relatively small (b + c = 11 + 1 = 12) and smaller than the recommended 25 [3] to approximate the computed chi-square value by the chi-square distribution well. In this case, we need to compute the exact p-value from the binomial distribution: from mlxtend.evaluate import mcnemar import numpy as np tb_a = np.array([[9959, 11], [1, 29]]) chi2, p = mcnemar(ary=tb_a, exact=True) print('chi-squared:', chi2) print('p-value:', p) chi-squared: None p-value: 0.005859375 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p \\approx 0.006 ) is smaller than \\alpha . API mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/)","title":"McNemar's Test"},{"location":"user_guide/evaluate/mcnemar/#mcnemars-test","text":"McNemar's test for paired nominal data from mlxtend.evaluate import mcnemar","title":"McNemar's Test"},{"location":"user_guide/evaluate/mcnemar/#overview","text":"McNemar's Test [1] (sometimes also called \"within-subjects chi-squared test\") is a statistical test for paired nominal data. In context of machine learning (or statistical) models, we can use McNemar's Test to compare the predictive accuracy of two models. McNemar's test is based on a 2 times 2 contigency table of the two model's predictions.","title":"Overview"},{"location":"user_guide/evaluate/mcnemar/#mcnemars-test-statistic","text":"In McNemar's Test, we formulate the null hypothesis that the probabilities p(b) and p(c) are the same, or in simplified terms: None of the two models performs better than the other. Thus, the alternative hypothesis is that the performances of the two models are not equal. The McNemar test statistic (\"chi-squared\") can be computed as follows: \\chi^2 = \\frac{(b - c)^2}{(b + c)}, If the sum of cell c and b is sufficiently large, the \\chi^2 value follows a chi-squared distribution with one degree of freedom. After setting a significance threshold, e.g,. \\alpha=0.05 we can compute the p-value -- assuming that the null hypothesis is true, the p-value is the probability of observing this empirical (or a larger) chi-squared value. If the p-value is lower than our chosen significance level, we can reject the null hypothesis that the two model's performances are equal.","title":"McNemar's Test Statistic"},{"location":"user_guide/evaluate/mcnemar/#continuity-correction","text":"Approximately 1 year after Quinn McNemar published the McNemar Test [1], Edwards [2] proposed a continuity corrected version, which is the more commonly used variant today: \\chi^2 = \\frac{( \\mid b - c \\mid - 1)^2}{(b + c)}.","title":"Continuity Correction"},{"location":"user_guide/evaluate/mcnemar/#exact-p-values","text":"As mentioned earlier, an exact binomial test is recommended for small sample sizes ( b + c < 25 [3]), since the chi-squared value is may not be well-approximated by the chi-squared distribution. The exact p-value can be computed as follows: p = 2 \\sum^{n}_{i=b} \\binom{n}{i} 0.5^i (1 - 0.5)^{n-i}, where n = b + c , and the factor 2 is used to compute the two-sided p-value.","title":"Exact p-values"},{"location":"user_guide/evaluate/mcnemar/#example","text":"For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose. In the following coding examples, we will use these 2 scenarios A and B to illustrate McNemar's test.","title":"Example"},{"location":"user_guide/evaluate/mcnemar/#references","text":"[1] McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. [2] Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. [3] https://en.wikipedia.org/wiki/McNemar%27s_test","title":"References"},{"location":"user_guide/evaluate/mcnemar/#example-1-creating-2x2-contigency-tables","text":"The mcnemar funtion expects a 2x2 contingency table as a NumPy array that is formatted as follows: Such a contigency matrix can be created by using the mcnemar_table function from mlxtend.evaluate . For example: import numpy as np from mlxtend.evaluate import mcnemar_table # The correct target (class) labels y_target = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) # Class labels predicted by model 1 y_model1 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) # Class labels predicted by model 2 y_model2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_table(y_target=y_target, y_model1=y_model1, y_model2=y_model2) print(tb) [[4 1] [2 3]]","title":"Example 1 - Creating 2x2 Contigency tables"},{"location":"user_guide/evaluate/mcnemar/#example-2-mcnemars-test-for-scenario-b","text":"No, let us continue with the example mentioned in the overview section and assume that we already computed the 2x2 contigency table: import numpy as np tb_b = np.array([[9945, 25], [15, 15]]) To test the null hypothesis that the predictive performance of two models are equal (using a significance level of \\alpha=0.05 ), we can conduct a corrected McNemar Test for computing the chi-squared and p-value as follows: from mlxtend.evaluate import mcnemar chi2, p = mcnemar(ary=tb_b, corrected=True) print('chi-squared:', chi2) print('p-value:', p) chi-squared: 2.025 p-value: 0.154728923485 Since the p-value is larger than our assumed significance threshold ( \\alpha=0.05 ), we cannot reject our null hypothesis and assume that there is no significant difference between the two predictive models.","title":"Example 2 - McNemar's Test for Scenario B"},{"location":"user_guide/evaluate/mcnemar/#example-3-mcnemars-test-for-scenario-a","text":"In contrast to scenario B (Example 2), the sample size in scenario A is relatively small (b + c = 11 + 1 = 12) and smaller than the recommended 25 [3] to approximate the computed chi-square value by the chi-square distribution well. In this case, we need to compute the exact p-value from the binomial distribution: from mlxtend.evaluate import mcnemar import numpy as np tb_a = np.array([[9959, 11], [1, 29]]) chi2, p = mcnemar(ary=tb_a, exact=True) print('chi-squared:', chi2) print('p-value:', p) chi-squared: None p-value: 0.005859375 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p \\approx 0.006 ) is smaller than \\alpha .","title":"Example 3 - McNemar's Test for Scenario A"},{"location":"user_guide/evaluate/mcnemar/#api","text":"mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/)","title":"API"},{"location":"user_guide/evaluate/mcnemar_table/","text":"Contigency Table for McNemar's Test Function to compute a 2x2 contingency table for McNemar's Test from mlxtend.evaluate import mcnemar_table Overview Contigency Table for McNemar's Test A 2x2 contigency table as being used in a McNemar's Test ( mlxtend.evaluate.mcnemar ) is a useful aid for comparing two different models. In contrast to a typical confusion matrix, this table compares two models to each other rather than showing the false positives, true positives, false negatives, and true negatives of a single model's predictions: For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose. References McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. https://en.wikipedia.org/wiki/McNemar%27s_test Example 2 - 2x2 Contigency Table import numpy as np from mlxtend.evaluate import mcnemar_table y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod1 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_table(y_target=y_true, y_model1=y_mod1, y_model2=y_mod2) tb array([[4, 1], [2, 3]]) To visualize (and better interpret) the contigency table via matplotlib, we can use the checkerboard_plot function: from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt brd = checkerboard_plot(tb, figsize=(3, 3), fmt='%d', col_labels=['model 2 wrong', 'model 2 right'], row_labels=['model 1 wrong', 'model 1 right']) plt.show() API mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/","title":"Contigency Table for McNemar's Test"},{"location":"user_guide/evaluate/mcnemar_table/#contigency-table-for-mcnemars-test","text":"Function to compute a 2x2 contingency table for McNemar's Test from mlxtend.evaluate import mcnemar_table","title":"Contigency Table for McNemar's Test"},{"location":"user_guide/evaluate/mcnemar_table/#overview","text":"","title":"Overview"},{"location":"user_guide/evaluate/mcnemar_table/#contigency-table-for-mcnemars-test_1","text":"A 2x2 contigency table as being used in a McNemar's Test ( mlxtend.evaluate.mcnemar ) is a useful aid for comparing two different models. In contrast to a typical confusion matrix, this table compares two models to each other rather than showing the false positives, true positives, false negatives, and true negatives of a single model's predictions: For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose.","title":"Contigency Table for McNemar's Test"},{"location":"user_guide/evaluate/mcnemar_table/#references","text":"McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. https://en.wikipedia.org/wiki/McNemar%27s_test","title":"References"},{"location":"user_guide/evaluate/mcnemar_table/#example-2-2x2-contigency-table","text":"import numpy as np from mlxtend.evaluate import mcnemar_table y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod1 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_table(y_target=y_true, y_model1=y_mod1, y_model2=y_mod2) tb array([[4, 1], [2, 3]]) To visualize (and better interpret) the contigency table via matplotlib, we can use the checkerboard_plot function: from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt brd = checkerboard_plot(tb, figsize=(3, 3), fmt='%d', col_labels=['model 2 wrong', 'model 2 right'], row_labels=['model 1 wrong', 'model 1 right']) plt.show()","title":"Example 2 - 2x2 Contigency Table"},{"location":"user_guide/evaluate/mcnemar_table/#api","text":"mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/","title":"API"},{"location":"user_guide/evaluate/mcnemar_tables/","text":"Contigency Tables for McNemar's Test and Cochran's Q Test Function to compute a 2x2 contingency tables for McNemar's Test and Cochran's Q Test from mlxtend.evaluate import mcnemar_tables Overview Contigency Tables A 2x2 contigency table as being used in a McNemar's Test ( mlxtend.evaluate.mcnemar ) is a useful aid for comparing two different models. In contrast to a typical confusion matrix, this table compares two models to each other rather than showing the false positives, true positives, false negatives, and true negatives of a single model's predictions: For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose. References McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. https://en.wikipedia.org/wiki/McNemar%27s_test Example 1 - Single 2x2 Contigency Table import numpy as np from mlxtend.evaluate import mcnemar_tables y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod1 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_tables(y_true, y_mod0, y_mod1) tb {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]])} To visualize (and better interpret) the contigency table via matplotlib, we can use the checkerboard_plot function: from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt brd = checkerboard_plot(tb['model_0 vs model_1'], figsize=(3, 3), fmt='%d', col_labels=['model 2 wrong', 'model 2 right'], row_labels=['model 1 wrong', 'model 1 right']) plt.show() Example 2 - Multiple 2x2 Contigency Tables If more than two models are provided as input to the mcnemar_tables function, a 2x2 contingency table will be created for each pair of models: import numpy as np from mlxtend.evaluate import mcnemar_tables y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod1 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 1, 0]) tb = mcnemar_tables(y_true, y_mod0, y_mod1, y_mod2) for key, value in tb.items(): print(key, '\\n', value, '\\n') model_0 vs model_1 [[ 4. 1.] [ 2. 3.]] model_0 vs model_2 [[ 4. 2.] [ 2. 2.]] model_1 vs model_2 [[ 5. 1.] [ 0. 4.]] API mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/)","title":"Contigency Tables for McNemar's Test and Cochran's Q Test"},{"location":"user_guide/evaluate/mcnemar_tables/#contigency-tables-for-mcnemars-test-and-cochrans-q-test","text":"Function to compute a 2x2 contingency tables for McNemar's Test and Cochran's Q Test from mlxtend.evaluate import mcnemar_tables","title":"Contigency Tables for McNemar's Test and Cochran's Q Test"},{"location":"user_guide/evaluate/mcnemar_tables/#overview","text":"","title":"Overview"},{"location":"user_guide/evaluate/mcnemar_tables/#contigency-tables","text":"A 2x2 contigency table as being used in a McNemar's Test ( mlxtend.evaluate.mcnemar ) is a useful aid for comparing two different models. In contrast to a typical confusion matrix, this table compares two models to each other rather than showing the false positives, true positives, false negatives, and true negatives of a single model's predictions: For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose.","title":"Contigency Tables"},{"location":"user_guide/evaluate/mcnemar_tables/#references","text":"McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. https://en.wikipedia.org/wiki/McNemar%27s_test","title":"References"},{"location":"user_guide/evaluate/mcnemar_tables/#example-1-single-2x2-contigency-table","text":"import numpy as np from mlxtend.evaluate import mcnemar_tables y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod1 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_tables(y_true, y_mod0, y_mod1) tb {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]])} To visualize (and better interpret) the contigency table via matplotlib, we can use the checkerboard_plot function: from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt brd = checkerboard_plot(tb['model_0 vs model_1'], figsize=(3, 3), fmt='%d', col_labels=['model 2 wrong', 'model 2 right'], row_labels=['model 1 wrong', 'model 1 right']) plt.show()","title":"Example 1 - Single 2x2 Contigency Table"},{"location":"user_guide/evaluate/mcnemar_tables/#example-2-multiple-2x2-contigency-tables","text":"If more than two models are provided as input to the mcnemar_tables function, a 2x2 contingency table will be created for each pair of models: import numpy as np from mlxtend.evaluate import mcnemar_tables y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod1 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 1, 0]) tb = mcnemar_tables(y_true, y_mod0, y_mod1, y_mod2) for key, value in tb.items(): print(key, '\\n', value, '\\n') model_0 vs model_1 [[ 4. 1.] [ 2. 3.]] model_0 vs model_2 [[ 4. 2.] [ 2. 2.]] model_1 vs model_2 [[ 5. 1.] [ 0. 4.]]","title":"Example 2 - Multiple 2x2 Contigency Tables"},{"location":"user_guide/evaluate/mcnemar_tables/#api","text":"mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/)","title":"API"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/","text":"5x2cv paired t test 5x2cv paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_5x2cv Overview The 5x2cv paired t test is a procedure for comparing the performance of two models (classifiers or regressors) that was proposed by Dietterich [1] to address shortcomings in other methods such as the resampled paired t test (see paired_ttest_resampled ) and the k-fold cross-validated paired t test (see paired_ttest_kfold_cv ). To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the 5x2cv paired t test, we repeat the splitting (50% training and 50% test data) 5 times. In each of the 5 iterations, we fit A and B to the training split and evaluate their performance ( p_A and p_B ) on the test split. Then, we rotate the training and test sets (the training set becomes the test set and vice versa) compute the performance again, which results in 2 performance difference measures: p^{(1)} = p^{(1)}_A - p^{(1)}_B and p^{(2)} = p^{(2)}_A - p^{(2)}_B. Then, we estimate the estimate mean and variance of the differences: \\overline{p} = \\frac{p^{(1)} + p^{(2)}}{2} and s^2 = (p^{(1)} - \\overline{p})^2 + (p^{(2)} - \\overline{p})^2. The variance of the difference is computed for the 5 iterations and then used to compute the t statistic as follows: t = \\frac{p_1^{(1)}}{\\sqrt{(1/5) \\sum_{i=1}^{5}s_i^2}}, where p_1^{(1)} is the p_1 from the very first iteration. The t statistic, assuming that it approximately follows as t distribution with 5 degrees of freedom, under the null hypothesis that the models A and B have equal performance. Using the t statistic, the p value can be computed and compared with a previously chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. References [1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923. Example 1 - 5x2cv paired t test Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the 5x2cv t test: from mlxtend.evaluate import paired_ttest_5x2cv t, p = paired_ttest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.539 p value: 0.184 Since p > \\alpha , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 5.386 p value: 0.003 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha . API paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/","title":"5x2cv paired *t* test"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/#5x2cv-paired-t-test","text":"5x2cv paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_5x2cv","title":"5x2cv paired t test"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/#overview","text":"The 5x2cv paired t test is a procedure for comparing the performance of two models (classifiers or regressors) that was proposed by Dietterich [1] to address shortcomings in other methods such as the resampled paired t test (see paired_ttest_resampled ) and the k-fold cross-validated paired t test (see paired_ttest_kfold_cv ). To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the 5x2cv paired t test, we repeat the splitting (50% training and 50% test data) 5 times. In each of the 5 iterations, we fit A and B to the training split and evaluate their performance ( p_A and p_B ) on the test split. Then, we rotate the training and test sets (the training set becomes the test set and vice versa) compute the performance again, which results in 2 performance difference measures: p^{(1)} = p^{(1)}_A - p^{(1)}_B and p^{(2)} = p^{(2)}_A - p^{(2)}_B. Then, we estimate the estimate mean and variance of the differences: \\overline{p} = \\frac{p^{(1)} + p^{(2)}}{2} and s^2 = (p^{(1)} - \\overline{p})^2 + (p^{(2)} - \\overline{p})^2. The variance of the difference is computed for the 5 iterations and then used to compute the t statistic as follows: t = \\frac{p_1^{(1)}}{\\sqrt{(1/5) \\sum_{i=1}^{5}s_i^2}}, where p_1^{(1)} is the p_1 from the very first iteration. The t statistic, assuming that it approximately follows as t distribution with 5 degrees of freedom, under the null hypothesis that the models A and B have equal performance. Using the t statistic, the p value can be computed and compared with a previously chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models.","title":"Overview"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/#references","text":"[1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923.","title":"References"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/#example-1-5x2cv-paired-t-test","text":"Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the 5x2cv t test: from mlxtend.evaluate import paired_ttest_5x2cv t, p = paired_ttest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.539 p value: 0.184 Since p > \\alpha , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 5.386 p value: 0.003 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha .","title":"Example 1 - 5x2cv paired t test"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/#api","text":"paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/","title":"API"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/","text":"K-fold cross-validated paired t test K-fold paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_kfold_cv Overview K-fold cross-validated paired t-test procedure is a common method for comparing the performance of two models (classifiers or regressors) and addresses some of the drawbacks of the resampled t-test procedure ; however, this method has still the problem that the training sets overlap and is not recommended to be used in practice [1], and techniques such as the paired_ttest_5x2cv should be used instead. To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the k-fold cross-validated paired t-test procedure, we split the test set into k parts of equal size, and each of these parts is then used for testing while the remaining k-1 parts (joined together) are used for training a classifier or regressor (i.e., the standard k-fold cross-validation procedure). In each k-fold cross-validation iteration, we then compute the difference in performance between A and B in each so that we obtain k difference measures. Now, by making the assumption that these k differences were independently drawn and follow an approximately normal distribution, we can compute the following t statistic with k-1 degrees of freedom according to Student's t test, under the null hypothesis that the models A and B have equal performance: t = \\frac{\\overline{p} \\sqrt{k}}{\\sqrt{\\sum_{i=1}^{k}(p^{(i) - \\overline{p}})^2 / (k-1)}}. Here, p^{(i)} computes the difference between the model performances in the i th iteration, p^{(i)} = p^{(i)}_A - p^{(i)}_B , and \\overline{p} represents the average difference between the classifier performances, \\overline{p} = \\frac{1}{k} \\sum^k_{i=1} p^{(i)} . Once we computed the t statistic we can compute the p value and compare it to our chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. The problem with this method, and the reason why it is not recommended to be used in practice, is that it violates an assumption of Student's t test [1]: the difference between the model performances ( p^{(i)} = p^{(i)}_A - p^{(i)}_B ) are not normal distributed because p^{(i)}_A and p^{(i)}_B are not independent the p^{(i)} 's themselves are not independent because training sets overlap References [1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923. Example 1 - K-fold cross-validated paired t test Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t-test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the k-fold cross-validated t-test: from mlxtend.evaluate import paired_ttest_kfold_cv t, p = paired_ttest_kfold_cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.861 p value: 0.096 Since p > t , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_kfold_cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 13.491 p value: 0.000 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha . API paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/","title":"K-fold cross-validated paired *t* test"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/#k-fold-cross-validated-paired-t-test","text":"K-fold paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_kfold_cv","title":"K-fold cross-validated paired t test"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/#overview","text":"K-fold cross-validated paired t-test procedure is a common method for comparing the performance of two models (classifiers or regressors) and addresses some of the drawbacks of the resampled t-test procedure ; however, this method has still the problem that the training sets overlap and is not recommended to be used in practice [1], and techniques such as the paired_ttest_5x2cv should be used instead. To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the k-fold cross-validated paired t-test procedure, we split the test set into k parts of equal size, and each of these parts is then used for testing while the remaining k-1 parts (joined together) are used for training a classifier or regressor (i.e., the standard k-fold cross-validation procedure). In each k-fold cross-validation iteration, we then compute the difference in performance between A and B in each so that we obtain k difference measures. Now, by making the assumption that these k differences were independently drawn and follow an approximately normal distribution, we can compute the following t statistic with k-1 degrees of freedom according to Student's t test, under the null hypothesis that the models A and B have equal performance: t = \\frac{\\overline{p} \\sqrt{k}}{\\sqrt{\\sum_{i=1}^{k}(p^{(i) - \\overline{p}})^2 / (k-1)}}. Here, p^{(i)} computes the difference between the model performances in the i th iteration, p^{(i)} = p^{(i)}_A - p^{(i)}_B , and \\overline{p} represents the average difference between the classifier performances, \\overline{p} = \\frac{1}{k} \\sum^k_{i=1} p^{(i)} . Once we computed the t statistic we can compute the p value and compare it to our chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. The problem with this method, and the reason why it is not recommended to be used in practice, is that it violates an assumption of Student's t test [1]: the difference between the model performances ( p^{(i)} = p^{(i)}_A - p^{(i)}_B ) are not normal distributed because p^{(i)}_A and p^{(i)}_B are not independent the p^{(i)} 's themselves are not independent because training sets overlap","title":"Overview"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/#references","text":"[1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923.","title":"References"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/#example-1-k-fold-cross-validated-paired-t-test","text":"Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t-test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the k-fold cross-validated t-test: from mlxtend.evaluate import paired_ttest_kfold_cv t, p = paired_ttest_kfold_cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.861 p value: 0.096 Since p > t , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_kfold_cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 13.491 p value: 0.000 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha .","title":"Example 1 - K-fold cross-validated paired t test"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/#api","text":"paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/","title":"API"},{"location":"user_guide/evaluate/paired_ttest_resampled/","text":"Resampled paired t test Resampled paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_resample Overview Resampled paired t test procedure (also called k-hold-out paired t test) is a popular method for comparing the performance of two models (classifiers or regressors); however, this method has many drawbacks and is not recommended to be used in practice [1], and techniques such as the paired_ttest_5x2cv should be used instead. To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the resampled paired t test procedure, we repeat this splitting procedure (with typically 2/3 training data and 1/3 test data) k times (usually 30). In each iteration, we train A and B on the training set and evaluate it on the test set. Then, we compute the difference in performance between A and B in each iteration so that we obtain k difference measures. Now, by making the assumption that these k differences were independently drawn and follow an approximately normal distribution, we can compute the following t statistic with k-1 degrees of freedom according to Student's t test, under the null hypothesis that the models A and B have equal performance: t = \\frac{\\overline{p} \\sqrt{k}}{\\sqrt{\\sum_{i=1}^{k}(p^{(i) - \\overline{p}})^2 / (k-1)}}. Here, p^{(i)} computes the difference between the model performances in the i th iteration, p^{(i)} = p^{(i)}_A - p^{(i)}_B , and \\overline{p} represents the average difference between the classifier performances, \\overline{p} = \\frac{1}{k} \\sum^k_{i=1} p^{(i)} . Once we computed the t statistic we can compute the p value and compare it to our chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. To summarize the procedure: i := 0 while i < k: split dataset into training and test subsets fit models A and B to the training set compute the performances of A and B on the test set record the performance difference between A and B i := i + 1 compute t-statistic compute p value from t-statistic with k-1 degrees of freedom compare p value to chosen significance threshold The problem with this method, and the reason why it is not recommended to be used in practice, is that it violates the assumptions of Student's t test [1]: the difference between the model performances ( p^{(i)} = p^{(i)}_A - p^{(i)}_B ) are not normal distributed because p^{(i)}_A and p^{(i)}_B are not independent the p^{(i)} 's themselves are not independent because of the overlapping test sets; also, test and training sets overlap as well References [1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923. Example 1 - Resampled paired t test Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the paired sample t test: from mlxtend.evaluate import paired_ttest_resampled t, p = paired_ttest_resampled(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.809 p value: 0.081 Since p > t , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_resampled(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 39.214 p value: 0.000 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha . API paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/","title":"Resampled paired *t* test"},{"location":"user_guide/evaluate/paired_ttest_resampled/#resampled-paired-t-test","text":"Resampled paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_resample","title":"Resampled paired t test"},{"location":"user_guide/evaluate/paired_ttest_resampled/#overview","text":"Resampled paired t test procedure (also called k-hold-out paired t test) is a popular method for comparing the performance of two models (classifiers or regressors); however, this method has many drawbacks and is not recommended to be used in practice [1], and techniques such as the paired_ttest_5x2cv should be used instead. To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the resampled paired t test procedure, we repeat this splitting procedure (with typically 2/3 training data and 1/3 test data) k times (usually 30). In each iteration, we train A and B on the training set and evaluate it on the test set. Then, we compute the difference in performance between A and B in each iteration so that we obtain k difference measures. Now, by making the assumption that these k differences were independently drawn and follow an approximately normal distribution, we can compute the following t statistic with k-1 degrees of freedom according to Student's t test, under the null hypothesis that the models A and B have equal performance: t = \\frac{\\overline{p} \\sqrt{k}}{\\sqrt{\\sum_{i=1}^{k}(p^{(i) - \\overline{p}})^2 / (k-1)}}. Here, p^{(i)} computes the difference between the model performances in the i th iteration, p^{(i)} = p^{(i)}_A - p^{(i)}_B , and \\overline{p} represents the average difference between the classifier performances, \\overline{p} = \\frac{1}{k} \\sum^k_{i=1} p^{(i)} . Once we computed the t statistic we can compute the p value and compare it to our chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. To summarize the procedure: i := 0 while i < k: split dataset into training and test subsets fit models A and B to the training set compute the performances of A and B on the test set record the performance difference between A and B i := i + 1 compute t-statistic compute p value from t-statistic with k-1 degrees of freedom compare p value to chosen significance threshold The problem with this method, and the reason why it is not recommended to be used in practice, is that it violates the assumptions of Student's t test [1]: the difference between the model performances ( p^{(i)} = p^{(i)}_A - p^{(i)}_B ) are not normal distributed because p^{(i)}_A and p^{(i)}_B are not independent the p^{(i)} 's themselves are not independent because of the overlapping test sets; also, test and training sets overlap as well","title":"Overview"},{"location":"user_guide/evaluate/paired_ttest_resampled/#references","text":"[1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923.","title":"References"},{"location":"user_guide/evaluate/paired_ttest_resampled/#example-1-resampled-paired-t-test","text":"Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the paired sample t test: from mlxtend.evaluate import paired_ttest_resampled t, p = paired_ttest_resampled(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.809 p value: 0.081 Since p > t , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_resampled(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 39.214 p value: 0.000 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha .","title":"Example 1 - Resampled paired t test"},{"location":"user_guide/evaluate/paired_ttest_resampled/#api","text":"paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/","title":"API"},{"location":"user_guide/evaluate/permutation_test/","text":"Permutation Test An implementation of a permutation test for hypothesis testing -- testing the null hypothesis that two different groups come from the same distribution. from mlxtend.evaluate import permutation_test Overview Permutation tests (also called exact tests, randomization tests, or re-randomization tests) are nonparametric test procedures to test the null hypothesis that two different groups come from the same distribution. A permutation test can be used for significance or hypothesis testing (including A/B testing) without requiring to make any assumptions about the sampling distribution (e.g., it doesn't require the samples to be normal distributed). Under the null hypothesis (treatment = control), any permutations are equally likely. (Note that there are (n+m)! permutations, where n is the number of records in the treatment sample, and m is the number of records in the control sample). For a two-sided test, we define the alternative hypothesis that the two samples are different (e.g., treatment != control). Compute the difference (here: mean) of sample x and sample y Combine all measurements into a single dataset Draw a permuted dataset from all possible permutations of the dataset in 2. Divide the permuted dataset into two datasets x' and y' of size n and m , respectively Compute the difference (here: mean) of sample x' and sample y' and record this difference Repeat steps 3-5 until all permutations are evaluated Return the p-value as the number of times the recorded differences were more extreme than the original difference from 1. and divide this number by the total number of permutations Here, the p-value is defined as the probability, given the null hypothesis (no difference between the samples) is true, that we obtain results that are at least as extreme as the results we observed (i.e., the sample difference from 1.). More formally, we can express the computation of the p-value as follows ([2]): p(t > t_0) = \\frac{1}{(n+m)!} \\sum^{(n+m)!}_{j=1} I(t_j > t_0), where t_0 is the observed value of the test statistic (1. in the list above), and t is the t-value, the statistic computed from the resamples (5.) t(x'_1, x'_2, ..., x'_n, y'_1, y'_2, ..., x'_m) = |\\bar{x'} - \\bar{y'}| , and I is the indicator function. Given a significance level that we specify prior to carrying out the permutation test (e.g., alpha=0.05), we fail to reject the null hypothesis if the p-value is greater than alpha. Note that if the number of permutation is large, sampling all permutation may not computationally be feasible. Thus, a common approximation is to perfom k rounds of permutations (where k is typically a value between 1000 and 2000). References [1] Efron, Bradley and Tibshirani, R. J., An introduction to the bootstrap, Chapman & Hall/CRC Monographs on Statistics & Applied Probability, 1994. [2] Unpingco, Jos\u00e9. Python for probability, statistics, and machine learning. Springer, 2016. [3] Pitman, E. J. G., Significance tests which may be applied to samples from any population, Royal Statistical Society Supplement, 1937, 4: 119-30 and 225-32 Example 1 -- Two-sided permutation test Perform a two-sided permutation test to test the null hypothesis that two groups, \"treatment\" and \"control\" come from the same distribution. We specify alpha=0.01 as our significance level. treatment = [ 28.44, 29.32, 31.22, 29.58, 30.34, 28.76, 29.21, 30.4 , 31.12, 31.78, 27.58, 31.57, 30.73, 30.43, 30.31, 30.32, 29.18, 29.52, 29.22, 30.56] control = [ 33.51, 30.63, 32.38, 32.52, 29.41, 30.93, 49.78, 28.96, 35.77, 31.42, 30.76, 30.6 , 23.64, 30.54, 47.78, 31.98, 34.52, 32.42, 31.32, 40.72] Since evaluating all possible permutations may take a while, we will use the approximation method (see the introduction for details): from mlxtend.evaluate import permutation_test p_value = permutation_test(treatment, control, method='approximate', num_rounds=10000, seed=0) print(p_value) 0.0066 Since p-value < alpha, we can reject the null hypothesis that the two samples come from the same distribution. Example 2 -- Calculating the p-value for correlation analysis (Pearson's R) Note: this is a one-sided hypothesis testing as we conduct the permutation test as \"how many times obtain a correlation coefficient that is greater than the observed value?\" import numpy as np from mlxtend.evaluate import permutation_test x = np.array([1, 2, 3, 4, 5, 6]) y = np.array([2, 4, 1, 5, 6, 7]) print('Observed pearson R: %.2f' % np.corrcoef(x, y)[1][0]) p_value = permutation_test(x, y, method='exact', func=lambda x, y: np.corrcoef(x, y)[1][0], seed=0) print('P value: %.2f' % p_value) Observed pearson R: 0.81 P value: 0.09 API permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/","title":"Permutation Test"},{"location":"user_guide/evaluate/permutation_test/#permutation-test","text":"An implementation of a permutation test for hypothesis testing -- testing the null hypothesis that two different groups come from the same distribution. from mlxtend.evaluate import permutation_test","title":"Permutation Test"},{"location":"user_guide/evaluate/permutation_test/#overview","text":"Permutation tests (also called exact tests, randomization tests, or re-randomization tests) are nonparametric test procedures to test the null hypothesis that two different groups come from the same distribution. A permutation test can be used for significance or hypothesis testing (including A/B testing) without requiring to make any assumptions about the sampling distribution (e.g., it doesn't require the samples to be normal distributed). Under the null hypothesis (treatment = control), any permutations are equally likely. (Note that there are (n+m)! permutations, where n is the number of records in the treatment sample, and m is the number of records in the control sample). For a two-sided test, we define the alternative hypothesis that the two samples are different (e.g., treatment != control). Compute the difference (here: mean) of sample x and sample y Combine all measurements into a single dataset Draw a permuted dataset from all possible permutations of the dataset in 2. Divide the permuted dataset into two datasets x' and y' of size n and m , respectively Compute the difference (here: mean) of sample x' and sample y' and record this difference Repeat steps 3-5 until all permutations are evaluated Return the p-value as the number of times the recorded differences were more extreme than the original difference from 1. and divide this number by the total number of permutations Here, the p-value is defined as the probability, given the null hypothesis (no difference between the samples) is true, that we obtain results that are at least as extreme as the results we observed (i.e., the sample difference from 1.). More formally, we can express the computation of the p-value as follows ([2]): p(t > t_0) = \\frac{1}{(n+m)!} \\sum^{(n+m)!}_{j=1} I(t_j > t_0), where t_0 is the observed value of the test statistic (1. in the list above), and t is the t-value, the statistic computed from the resamples (5.) t(x'_1, x'_2, ..., x'_n, y'_1, y'_2, ..., x'_m) = |\\bar{x'} - \\bar{y'}| , and I is the indicator function. Given a significance level that we specify prior to carrying out the permutation test (e.g., alpha=0.05), we fail to reject the null hypothesis if the p-value is greater than alpha. Note that if the number of permutation is large, sampling all permutation may not computationally be feasible. Thus, a common approximation is to perfom k rounds of permutations (where k is typically a value between 1000 and 2000).","title":"Overview"},{"location":"user_guide/evaluate/permutation_test/#references","text":"[1] Efron, Bradley and Tibshirani, R. J., An introduction to the bootstrap, Chapman & Hall/CRC Monographs on Statistics & Applied Probability, 1994. [2] Unpingco, Jos\u00e9. Python for probability, statistics, and machine learning. Springer, 2016. [3] Pitman, E. J. G., Significance tests which may be applied to samples from any population, Royal Statistical Society Supplement, 1937, 4: 119-30 and 225-32","title":"References"},{"location":"user_guide/evaluate/permutation_test/#example-1-two-sided-permutation-test","text":"Perform a two-sided permutation test to test the null hypothesis that two groups, \"treatment\" and \"control\" come from the same distribution. We specify alpha=0.01 as our significance level. treatment = [ 28.44, 29.32, 31.22, 29.58, 30.34, 28.76, 29.21, 30.4 , 31.12, 31.78, 27.58, 31.57, 30.73, 30.43, 30.31, 30.32, 29.18, 29.52, 29.22, 30.56] control = [ 33.51, 30.63, 32.38, 32.52, 29.41, 30.93, 49.78, 28.96, 35.77, 31.42, 30.76, 30.6 , 23.64, 30.54, 47.78, 31.98, 34.52, 32.42, 31.32, 40.72] Since evaluating all possible permutations may take a while, we will use the approximation method (see the introduction for details): from mlxtend.evaluate import permutation_test p_value = permutation_test(treatment, control, method='approximate', num_rounds=10000, seed=0) print(p_value) 0.0066 Since p-value < alpha, we can reject the null hypothesis that the two samples come from the same distribution.","title":"Example 1 -- Two-sided permutation test"},{"location":"user_guide/evaluate/permutation_test/#example-2-calculating-the-p-value-for-correlation-analysis-pearsons-r","text":"Note: this is a one-sided hypothesis testing as we conduct the permutation test as \"how many times obtain a correlation coefficient that is greater than the observed value?\" import numpy as np from mlxtend.evaluate import permutation_test x = np.array([1, 2, 3, 4, 5, 6]) y = np.array([2, 4, 1, 5, 6, 7]) print('Observed pearson R: %.2f' % np.corrcoef(x, y)[1][0]) p_value = permutation_test(x, y, method='exact', func=lambda x, y: np.corrcoef(x, y)[1][0], seed=0) print('P value: %.2f' % p_value) Observed pearson R: 0.81 P value: 0.09","title":"Example 2 -- Calculating the p-value for correlation analysis (Pearson's R)"},{"location":"user_guide/evaluate/permutation_test/#api","text":"permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/","title":"API"},{"location":"user_guide/evaluate/proportion_difference/","text":"Proportion Difference Test Test of the difference of proportions for classifier performance comparison. from mlxtend.evaluate import proportion_difference Overview There are several different statistical hypothesis testing frameworks that are being used in practice to compare the performance of classification models, including common methods such as difference of two proportions (here, the proportions are the estimated generalization accuracies from a test set), for which we can construct 95% confidence intervals based on the concept of the Normal Approximation to the Binomial that was covered in Part I. Performing a z-score test for two population proportions is inarguably the most straight-forward way to compare to models (but certainly not the best!): In a nutshell, if the 95% confidence intervals of the accuracies of two models do not overlap, we can reject the null hypothesis that the performance of both classifiers is equal at a confidence level of \\alpha=0.05 (or 5% probability). Violations of assumptions aside (for instance that the test set samples are not independent), as Thomas Dietterich noted based on empircal results in a simulated study [1], this test tends to have a high false positive rate (here: incorrectly detecting difference when there is none), which is among the reasons why it is not recommended in practice. Nonetheless, for the sake of completeness, and since it a commonly used method in practice, the general procedure is outlined below as follows (which also generally applies to the different hypothesis tests presented later): formulate the hypothesis to be tested (for instance, the null hypothesis stating that the proportions are the same; consequently, the alternative hypothesis that the proportions are different, if we use a two-tailed test); decide upon a significance threshold (for instance, if the probability of observing a difference more extreme than the one observed is more than 5%, then we plan to reject the null hypothesis); analyze the data, compute the test statistic (here: z-score), and compare its associated p-value (probability) to the previously determined significance threshold; based on the p-value and significance threshold, either accept or reject the null hypothesis at the given confidence level and interpret the results. The z-score is computed as the observed difference divided by the square root for their combined variances z = \\frac{ACC_1 - ACC_2}{\\sqrt{\\sigma_{1}^2 + \\sigma_{2}^2}}, where ACC_1 is the accuracy of one model and ACC_2 is the accuracy of a second model estimated from the test set. Recall that we computed the variance of the estimated of the estimated accuracy as \\sigma^2 = \\frac{ACC(1-ACC)}{n} in Part I and then computed the confidence interval (Normal Approximation Interval) as ACC \\pm z \\times \\sigma, where z=1.96 for a 95% confidence interval. Comparing the confidence intervals of two accuracy estimates and checking whether they overlap is then analogous to computing the z value for the difference in proportions and comparing the probability (p-value) to the chosen significance threshold. So, to compute the z-score directly for the difference of two proportions, ACC_1 and ACC_2 , we pool these proportions (assuming that ACC_1 and ACC_2 are the performances of two models estimated on two indendent test sets of size n_1 and n_2 , respectively), ACC_{1, 2} = \\frac{ACC_1 \\times n_1 + ACC_2 \\times n_2}{n_1 + n_2}, and compute the standard deviation as \\sigma_{1,2} = \\sqrt{\\frac{ACC_{1, 2} (1 - ACC_{1, 2})}{n_1 + n_2}}, such that we can compute the z-score, z = \\frac{ACC_1 - ACC_2}{\\sigma_{1,2}}. Since, due to using the same test set (and violating the independence assumption) we have n_1 = n_2 = n , so that we can simplify the z-score computation to z = \\frac{ACC_1 - ACC_2}{\\sqrt{2\\sigma^2}} = \\frac{ACC_1 - ACC_2}{\\sqrt{2\\cdot ACC_{1,2}(1-ACC_{1,2}))/n}}. where ACC_{1, 2} is simply (ACC_1 + ACC_2)/2 . In the second step, based on the computed z value (this assumes the the test errors are independent, which is usually violated in practice as we use the same test set) we can reject the null hypothesis that the a pair of models has equal performance (here, measured in \"classification aaccuracy\") at an \\alpha=0.05 level if z is greater than 1.96. Or if we want to put in the extra work, we can compute the area under the a standard normal cumulative distribution at the z-score threshold. If we find this p-value is smaller than a significance level we set prior to conducting the test, then we can reject the null hypothesis at that given significance level. The problem with this test though is that we use the same test set to compute the accuracy of the two classifiers; thus, it might be better to use a paired test such as a paired sample t-test, but a more robust alternative is the McNemar test. References [1] Dietterich, Thomas G. \"Approximate statistical tests for comparing supervised classification learning algorithms.\" Neural computation 10, no. 7 (1998): 1895-1923. Example 1 - Difference of Proportions As an example for applying this test, consider the following 2 model predictions: import numpy as np ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) Assume, the test accuracies are as follows: acc_1 = np.sum(y_true == y_model_1) / y_true.shape[0] acc_2 = np.sum(y_true == y_model_2) / y_true.shape[0] print('Accuracy Model 1:', acc_1) print('Accuracy Model 2:', acc_2) Accuracy Model 1: 0.84 Accuracy Model 2: 0.92 Now, setting a significance threshold of \\alpha=0.05 and conducting the test from mlxtend.evaluate import proportion_difference z, p_value = proportion_difference(acc_1, acc_2, n_1=y_true.shape[0]) print('z: %.3f' % z) print('p-value: %.3f' % p_value) z: -1.754 p-value: 0.040 we find that there is a statistically significant difference between the model performances. It should be highlighted though that using this test, due to the typical independence violation of using the same test set as well as its high false positive rate, it is not recommended to use this test in practice. API proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/","title":"Proportion Difference Test"},{"location":"user_guide/evaluate/proportion_difference/#proportion-difference-test","text":"Test of the difference of proportions for classifier performance comparison. from mlxtend.evaluate import proportion_difference","title":"Proportion Difference Test"},{"location":"user_guide/evaluate/proportion_difference/#overview","text":"There are several different statistical hypothesis testing frameworks that are being used in practice to compare the performance of classification models, including common methods such as difference of two proportions (here, the proportions are the estimated generalization accuracies from a test set), for which we can construct 95% confidence intervals based on the concept of the Normal Approximation to the Binomial that was covered in Part I. Performing a z-score test for two population proportions is inarguably the most straight-forward way to compare to models (but certainly not the best!): In a nutshell, if the 95% confidence intervals of the accuracies of two models do not overlap, we can reject the null hypothesis that the performance of both classifiers is equal at a confidence level of \\alpha=0.05 (or 5% probability). Violations of assumptions aside (for instance that the test set samples are not independent), as Thomas Dietterich noted based on empircal results in a simulated study [1], this test tends to have a high false positive rate (here: incorrectly detecting difference when there is none), which is among the reasons why it is not recommended in practice. Nonetheless, for the sake of completeness, and since it a commonly used method in practice, the general procedure is outlined below as follows (which also generally applies to the different hypothesis tests presented later): formulate the hypothesis to be tested (for instance, the null hypothesis stating that the proportions are the same; consequently, the alternative hypothesis that the proportions are different, if we use a two-tailed test); decide upon a significance threshold (for instance, if the probability of observing a difference more extreme than the one observed is more than 5%, then we plan to reject the null hypothesis); analyze the data, compute the test statistic (here: z-score), and compare its associated p-value (probability) to the previously determined significance threshold; based on the p-value and significance threshold, either accept or reject the null hypothesis at the given confidence level and interpret the results. The z-score is computed as the observed difference divided by the square root for their combined variances z = \\frac{ACC_1 - ACC_2}{\\sqrt{\\sigma_{1}^2 + \\sigma_{2}^2}}, where ACC_1 is the accuracy of one model and ACC_2 is the accuracy of a second model estimated from the test set. Recall that we computed the variance of the estimated of the estimated accuracy as \\sigma^2 = \\frac{ACC(1-ACC)}{n} in Part I and then computed the confidence interval (Normal Approximation Interval) as ACC \\pm z \\times \\sigma, where z=1.96 for a 95% confidence interval. Comparing the confidence intervals of two accuracy estimates and checking whether they overlap is then analogous to computing the z value for the difference in proportions and comparing the probability (p-value) to the chosen significance threshold. So, to compute the z-score directly for the difference of two proportions, ACC_1 and ACC_2 , we pool these proportions (assuming that ACC_1 and ACC_2 are the performances of two models estimated on two indendent test sets of size n_1 and n_2 , respectively), ACC_{1, 2} = \\frac{ACC_1 \\times n_1 + ACC_2 \\times n_2}{n_1 + n_2}, and compute the standard deviation as \\sigma_{1,2} = \\sqrt{\\frac{ACC_{1, 2} (1 - ACC_{1, 2})}{n_1 + n_2}}, such that we can compute the z-score, z = \\frac{ACC_1 - ACC_2}{\\sigma_{1,2}}. Since, due to using the same test set (and violating the independence assumption) we have n_1 = n_2 = n , so that we can simplify the z-score computation to z = \\frac{ACC_1 - ACC_2}{\\sqrt{2\\sigma^2}} = \\frac{ACC_1 - ACC_2}{\\sqrt{2\\cdot ACC_{1,2}(1-ACC_{1,2}))/n}}. where ACC_{1, 2} is simply (ACC_1 + ACC_2)/2 . In the second step, based on the computed z value (this assumes the the test errors are independent, which is usually violated in practice as we use the same test set) we can reject the null hypothesis that the a pair of models has equal performance (here, measured in \"classification aaccuracy\") at an \\alpha=0.05 level if z is greater than 1.96. Or if we want to put in the extra work, we can compute the area under the a standard normal cumulative distribution at the z-score threshold. If we find this p-value is smaller than a significance level we set prior to conducting the test, then we can reject the null hypothesis at that given significance level. The problem with this test though is that we use the same test set to compute the accuracy of the two classifiers; thus, it might be better to use a paired test such as a paired sample t-test, but a more robust alternative is the McNemar test.","title":"Overview"},{"location":"user_guide/evaluate/proportion_difference/#references","text":"[1] Dietterich, Thomas G. \"Approximate statistical tests for comparing supervised classification learning algorithms.\" Neural computation 10, no. 7 (1998): 1895-1923.","title":"References"},{"location":"user_guide/evaluate/proportion_difference/#example-1-difference-of-proportions","text":"As an example for applying this test, consider the following 2 model predictions: import numpy as np ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) Assume, the test accuracies are as follows: acc_1 = np.sum(y_true == y_model_1) / y_true.shape[0] acc_2 = np.sum(y_true == y_model_2) / y_true.shape[0] print('Accuracy Model 1:', acc_1) print('Accuracy Model 2:', acc_2) Accuracy Model 1: 0.84 Accuracy Model 2: 0.92 Now, setting a significance threshold of \\alpha=0.05 and conducting the test from mlxtend.evaluate import proportion_difference z, p_value = proportion_difference(acc_1, acc_2, n_1=y_true.shape[0]) print('z: %.3f' % z) print('p-value: %.3f' % p_value) z: -1.754 p-value: 0.040 we find that there is a statistically significant difference between the model performances. It should be highlighted though that using this test, due to the typical independence violation of using the same test set as well as its high false positive rate, it is not recommended to use this test in practice.","title":"Example 1 - Difference of Proportions"},{"location":"user_guide/evaluate/proportion_difference/#api","text":"proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/","title":"API"},{"location":"user_guide/evaluate/scoring/","text":"Scoring A function for computing various different performance metrics. from mlxtend.evaluate import scoring Overview Confusion Matrix The confusion matrix (or error matrix ) is one way to summarize the performance of a classifier for binary classification tasks. This square matrix consists of columns and rows that list the number of instances as absolute or relative \"actual class\" vs. \"predicted class\" ratios. Let P be the label of class 1 and N be the label of a second class or the label of all classes that are not class 1 in a multi-class setting. Error and Accuracy Both the prediction error (ERR) and accuracy (ACC) provide general information about how many samples are misclassified. The error can be understood as the sum of all false predictions divided by the number of total predications, and the the accuracy is calculated as the sum of correct predictions divided by the total number of predictions, respectively. ERR = \\frac{FP + FN}{FP+ FN + TP + TN} = 1-ACC ACC = \\frac{TP + TN}{FP+ FN + TP + TN} = 1-ERR True and False Positive Rates The True Positive Rate (TPR) and False Positive Rate (FPR) are performance metrics that are especially useful for imbalanced class problems. In spam classification , for example, we are of course primarily interested in the detection and filtering out of spam . However, it is also important to decrease the number of messages that were incorrectly classified as spam ( False Positives ): A situation where a person misses an important message is considered as \"worse\" than a situation where a person ends up with a few spam messages in his e-mail inbox. In contrast to the FPR , the True Positive Rate provides useful information about the fraction of positive (or relevant ) samples that were correctly identified out of the total pool of Positives . FPR = \\frac{FP}{N} = \\frac{FP}{FP + TN} TPR = \\frac{TP}{P} = \\frac{TP}{FN + TP} Precision, Recall, and the F1-Score Precision (PRE) and Recall (REC) are metrics that are more commonly used in Information Technology and related to the False and True Prositive Rates . In fact, Recall is synonymous to the True Positive Rate and also sometimes called Sensitivity . The F _1 -Score can be understood as a combination of both Precision and Recall . PRE = \\frac{TP}{TP + FP} REC = TPR = \\frac{TP}{P} = \\frac{TP}{FN + TP} F_1 = 2 \\cdot \\frac{PRE \\cdot REC}{PRE + REC} Sensitivity and Specificity Sensitivity (SEN) is synonymous to Recall and the True Positive Rate whereas Specificity (SPC) is synonymous to the True Negative Rate -- Sensitivity measures the recovery rate of the Positives and complimentary, the Specificity measures the recovery rate of the Negatives . SEN = TPR = REC = \\frac{TP}{P} = \\frac{TP}{FN + TP} SPC = TNR =\\frac{TN}{N} = \\frac{TN}{FP + TN} Matthews Correlation Coefficient Matthews correlation coefficient (MCC) was first formulated by Brian W. Matthews [3] in 1975 to assess the performance of protein secondary structure predictions. The MCC can be understood as a specific case of a linear correlation coefficient ( Pearson's R ) for a binary classification setting and is considered as especially useful in unbalanced class settings. The previous metrics take values in the range between 0 (worst) and 1 (best), whereas the MCC is bounded between the range 1 (perfect correlation between ground truth and predicted outcome) and -1 (inverse or negative correlation) -- a value of 0 denotes a random prediction. MCC = \\frac{ TP \\times TN - FP \\times FN } {\\sqrt{ (TP + FP) ( TP + FN ) ( TN + FP ) ( TN + FN ) } } Average Per-Class Accuracy The \"overall\" accuracy is defined as the number of correct predictions ( true positives TP and true negatives TN) over all samples n : ACC = \\frac{TP + TN}{n} in a binary class setting: In a multi-class setting, we can generalize the computation of the accuracy as the fraction of all true predictions (the diagonal) over all samples n. ACC = \\frac{T}{n} Considering a multi-class problem with 3 classes (C0, C1, C2) let's assume our model made the following predictions: We compute the accuracy as: ACC = \\frac{3 + 50 + 18}{90} \\approx 0.79 Now, in order to compute the average per-class accuracy , we compute the binary accuracy for each class label separately; i.e., if class 1 is the positive class, class 0 and 2 are both considered the negative class. APC\\;ACC = \\frac{83/90 + 71/90 + 78/90}{3} \\approx 0.86 References [1] S. Raschka. An overview of general performance metrics of binary classifier systems . Computing Research Repository (CoRR), abs/1410.5330, 2014. [2] Cyril Goutte and Eric Gaussier. A probabilistic interpretation of precision, recall and f-score, with implication for evaluation . In Advances in Information Retrieval, pages 345\u2013359. Springer, 2005. [3] Brian W Matthews. Comparison of the predicted and observed secondary structure of T4 phage lysozyme . Biochimica et Biophysica Acta (BBA)- Protein Structure, 405(2):442\u2013451, 1975. Example 1 - Classification Error from mlxtend.evaluate import scoring y_targ = [1, 1, 1, 0, 0, 2, 0, 3] y_pred = [1, 0, 1, 0, 0, 2, 1, 3] res = scoring(y_target=y_targ, y_predicted=y_pred, metric='error') print('Error: %s%%' % (res * 100)) Error: 25.0% API scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"Scoring"},{"location":"user_guide/evaluate/scoring/#scoring","text":"A function for computing various different performance metrics. from mlxtend.evaluate import scoring","title":"Scoring"},{"location":"user_guide/evaluate/scoring/#overview","text":"","title":"Overview"},{"location":"user_guide/evaluate/scoring/#confusion-matrix","text":"The confusion matrix (or error matrix ) is one way to summarize the performance of a classifier for binary classification tasks. This square matrix consists of columns and rows that list the number of instances as absolute or relative \"actual class\" vs. \"predicted class\" ratios. Let P be the label of class 1 and N be the label of a second class or the label of all classes that are not class 1 in a multi-class setting.","title":"Confusion Matrix"},{"location":"user_guide/evaluate/scoring/#error-and-accuracy","text":"Both the prediction error (ERR) and accuracy (ACC) provide general information about how many samples are misclassified. The error can be understood as the sum of all false predictions divided by the number of total predications, and the the accuracy is calculated as the sum of correct predictions divided by the total number of predictions, respectively. ERR = \\frac{FP + FN}{FP+ FN + TP + TN} = 1-ACC ACC = \\frac{TP + TN}{FP+ FN + TP + TN} = 1-ERR","title":"Error and Accuracy"},{"location":"user_guide/evaluate/scoring/#true-and-false-positive-rates","text":"The True Positive Rate (TPR) and False Positive Rate (FPR) are performance metrics that are especially useful for imbalanced class problems. In spam classification , for example, we are of course primarily interested in the detection and filtering out of spam . However, it is also important to decrease the number of messages that were incorrectly classified as spam ( False Positives ): A situation where a person misses an important message is considered as \"worse\" than a situation where a person ends up with a few spam messages in his e-mail inbox. In contrast to the FPR , the True Positive Rate provides useful information about the fraction of positive (or relevant ) samples that were correctly identified out of the total pool of Positives . FPR = \\frac{FP}{N} = \\frac{FP}{FP + TN} TPR = \\frac{TP}{P} = \\frac{TP}{FN + TP}","title":"True and False Positive Rates"},{"location":"user_guide/evaluate/scoring/#precision-recall-and-the-f1-score","text":"Precision (PRE) and Recall (REC) are metrics that are more commonly used in Information Technology and related to the False and True Prositive Rates . In fact, Recall is synonymous to the True Positive Rate and also sometimes called Sensitivity . The F _1 -Score can be understood as a combination of both Precision and Recall . PRE = \\frac{TP}{TP + FP} REC = TPR = \\frac{TP}{P} = \\frac{TP}{FN + TP} F_1 = 2 \\cdot \\frac{PRE \\cdot REC}{PRE + REC}","title":"Precision, Recall, and the F1-Score"},{"location":"user_guide/evaluate/scoring/#sensitivity-and-specificity","text":"Sensitivity (SEN) is synonymous to Recall and the True Positive Rate whereas Specificity (SPC) is synonymous to the True Negative Rate -- Sensitivity measures the recovery rate of the Positives and complimentary, the Specificity measures the recovery rate of the Negatives . SEN = TPR = REC = \\frac{TP}{P} = \\frac{TP}{FN + TP} SPC = TNR =\\frac{TN}{N} = \\frac{TN}{FP + TN}","title":"Sensitivity and Specificity"},{"location":"user_guide/evaluate/scoring/#matthews-correlation-coefficient","text":"Matthews correlation coefficient (MCC) was first formulated by Brian W. Matthews [3] in 1975 to assess the performance of protein secondary structure predictions. The MCC can be understood as a specific case of a linear correlation coefficient ( Pearson's R ) for a binary classification setting and is considered as especially useful in unbalanced class settings. The previous metrics take values in the range between 0 (worst) and 1 (best), whereas the MCC is bounded between the range 1 (perfect correlation between ground truth and predicted outcome) and -1 (inverse or negative correlation) -- a value of 0 denotes a random prediction. MCC = \\frac{ TP \\times TN - FP \\times FN } {\\sqrt{ (TP + FP) ( TP + FN ) ( TN + FP ) ( TN + FN ) } }","title":"Matthews Correlation Coefficient"},{"location":"user_guide/evaluate/scoring/#average-per-class-accuracy","text":"The \"overall\" accuracy is defined as the number of correct predictions ( true positives TP and true negatives TN) over all samples n : ACC = \\frac{TP + TN}{n} in a binary class setting: In a multi-class setting, we can generalize the computation of the accuracy as the fraction of all true predictions (the diagonal) over all samples n. ACC = \\frac{T}{n} Considering a multi-class problem with 3 classes (C0, C1, C2) let's assume our model made the following predictions: We compute the accuracy as: ACC = \\frac{3 + 50 + 18}{90} \\approx 0.79 Now, in order to compute the average per-class accuracy , we compute the binary accuracy for each class label separately; i.e., if class 1 is the positive class, class 0 and 2 are both considered the negative class. APC\\;ACC = \\frac{83/90 + 71/90 + 78/90}{3} \\approx 0.86","title":"Average Per-Class Accuracy"},{"location":"user_guide/evaluate/scoring/#references","text":"[1] S. Raschka. An overview of general performance metrics of binary classifier systems . Computing Research Repository (CoRR), abs/1410.5330, 2014. [2] Cyril Goutte and Eric Gaussier. A probabilistic interpretation of precision, recall and f-score, with implication for evaluation . In Advances in Information Retrieval, pages 345\u2013359. Springer, 2005. [3] Brian W Matthews. Comparison of the predicted and observed secondary structure of T4 phage lysozyme . Biochimica et Biophysica Acta (BBA)- Protein Structure, 405(2):442\u2013451, 1975.","title":"References"},{"location":"user_guide/evaluate/scoring/#example-1-classification-error","text":"from mlxtend.evaluate import scoring y_targ = [1, 1, 1, 0, 0, 2, 0, 3] y_pred = [1, 0, 1, 0, 0, 2, 1, 3] res = scoring(y_target=y_targ, y_predicted=y_pred, metric='error') print('Error: %s%%' % (res * 100)) Error: 25.0%","title":"Example 1 - Classification Error"},{"location":"user_guide/evaluate/scoring/#api","text":"scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"API"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/","text":"Linear Discriminant Analysis Implementation of Linear Discriminant Analysis for dimensionality reduction from mlxtend.feature_extraction import LinearDiscriminantAnalysis Overview Linear Discriminant Analysis (LDA) is most commonly used as dimensionality reduction technique in the pre-processing step for pattern-classification and machine learning applications. The goal is to project a dataset onto a lower-dimensional space with good class-separability in order avoid overfitting (\"curse of dimensionality\") and also reduce computational costs. Ronald A. Fisher formulated the Linear Discriminant in 1936 ( The Use of Multiple Measurements in Taxonomic Problems ), and it also has some practical uses as classifier. The original Linear discriminant was described for a 2-class problem, and it was then later generalized as \"multi-class Linear Discriminant Analysis\" or \"Multiple Discriminant Analysis\" by C. R. Rao in 1948 ( The utilization of multiple measurements in problems of biological classification ) The general LDA approach is very similar to a Principal Component Analysis, but in addition to finding the component axes that maximize the variance of our data (PCA), we are additionally interested in the axes that maximize the separation between multiple classes (LDA). So, in a nutshell, often the goal of an LDA is to project a feature space (a dataset n-dimensional samples) onto a smaller subspace k (where k \\leq n-1 ) while maintaining the class-discriminatory information. In general, dimensionality reduction does not only help reducing computational costs for a given classification task, but it can also be helpful to avoid overfitting by minimizing the error in parameter estimation (\"curse of dimensionality\"). Summarizing the LDA approach in 5 steps Listed below are the 5 general steps for performing a linear discriminant analysis. Compute the d -dimensional mean vectors for the different classes from the dataset. Compute the scatter matrices (in-between-class and within-class scatter matrix). Compute the eigenvectors ( \\mathbf{e_1}, \\; \\mathbf{e_2}, \\; ..., \\; \\mathbf{e_d} ) and corresponding eigenvalues ( \\mathbf{\\lambda_1}, \\; \\mathbf{\\lambda_2}, \\; ..., \\; \\mathbf{\\lambda_d} ) for the scatter matrices. Sort the eigenvectors by decreasing eigenvalues and choose k eigenvectors with the largest eigenvalues to form a k \\times d dimensional matrix \\mathbf{W} (where every column represents an eigenvector). Use this k \\times d eigenvector matrix to transform the samples onto the new subspace. This can be summarized by the mathematical equation: \\mathbf{Y} = \\mathbf{X} \\times \\mathbf{W} (where \\mathbf{X} is a n \\times d -dimensional matrix representing the n samples, and \\mathbf{y} are the transformed n \\times k -dimensional samples in the new subspace). References Fisher, Ronald A. \" The use of multiple measurements in taxonomic problems. \" Annals of eugenics 7.2 (1936): 179-188. Rao, C. Radhakrishna. \" The utilization of multiple measurements in problems of biological classification. \" Journal of the Royal Statistical Society. Series B (Methodological) 10.2 (1948): 159-203. Example 1 - LDA on Iris from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import LinearDiscriminantAnalysis X, y = iris_data() X = standardize(X) lda = LinearDiscriminantAnalysis(n_discriminants=2) lda.fit(X, y) X_lda = lda.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_lda[y == lab, 0], X_lda[y == lab, 1], label=lab, c=col) plt.xlabel('Linear Discriminant 1') plt.ylabel('Linear Discriminant 2') plt.legend(loc='lower right') plt.tight_layout() plt.show() Example 2 - Plotting the Between-Class Variance Explained Ratio from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import LinearDiscriminantAnalysis X, y = iris_data() X = standardize(X) lda = LinearDiscriminantAnalysis(n_discriminants=None) lda.fit(X, y) X_lda = lda.transform(X) import numpy as np tot = sum(lda.e_vals_) var_exp = [(i / tot)*100 for i in sorted(lda.e_vals_, reverse=True)] cum_var_exp = np.cumsum(var_exp) with plt.style.context('seaborn-whitegrid'): fig, ax = plt.subplots(figsize=(6, 4)) plt.bar(range(4), var_exp, alpha=0.5, align='center', label='individual explained variance') plt.step(range(4), cum_var_exp, where='mid', label='cumulative explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.xticks(range(4)) ax.set_xticklabels(np.arange(1, X.shape[1] + 1)) plt.legend(loc='best') plt.tight_layout() API LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/ Methods fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors.","title":"Linear Discriminant Analysis"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#linear-discriminant-analysis","text":"Implementation of Linear Discriminant Analysis for dimensionality reduction from mlxtend.feature_extraction import LinearDiscriminantAnalysis","title":"Linear Discriminant Analysis"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#overview","text":"Linear Discriminant Analysis (LDA) is most commonly used as dimensionality reduction technique in the pre-processing step for pattern-classification and machine learning applications. The goal is to project a dataset onto a lower-dimensional space with good class-separability in order avoid overfitting (\"curse of dimensionality\") and also reduce computational costs. Ronald A. Fisher formulated the Linear Discriminant in 1936 ( The Use of Multiple Measurements in Taxonomic Problems ), and it also has some practical uses as classifier. The original Linear discriminant was described for a 2-class problem, and it was then later generalized as \"multi-class Linear Discriminant Analysis\" or \"Multiple Discriminant Analysis\" by C. R. Rao in 1948 ( The utilization of multiple measurements in problems of biological classification ) The general LDA approach is very similar to a Principal Component Analysis, but in addition to finding the component axes that maximize the variance of our data (PCA), we are additionally interested in the axes that maximize the separation between multiple classes (LDA). So, in a nutshell, often the goal of an LDA is to project a feature space (a dataset n-dimensional samples) onto a smaller subspace k (where k \\leq n-1 ) while maintaining the class-discriminatory information. In general, dimensionality reduction does not only help reducing computational costs for a given classification task, but it can also be helpful to avoid overfitting by minimizing the error in parameter estimation (\"curse of dimensionality\").","title":"Overview"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#summarizing-the-lda-approach-in-5-steps","text":"Listed below are the 5 general steps for performing a linear discriminant analysis. Compute the d -dimensional mean vectors for the different classes from the dataset. Compute the scatter matrices (in-between-class and within-class scatter matrix). Compute the eigenvectors ( \\mathbf{e_1}, \\; \\mathbf{e_2}, \\; ..., \\; \\mathbf{e_d} ) and corresponding eigenvalues ( \\mathbf{\\lambda_1}, \\; \\mathbf{\\lambda_2}, \\; ..., \\; \\mathbf{\\lambda_d} ) for the scatter matrices. Sort the eigenvectors by decreasing eigenvalues and choose k eigenvectors with the largest eigenvalues to form a k \\times d dimensional matrix \\mathbf{W} (where every column represents an eigenvector). Use this k \\times d eigenvector matrix to transform the samples onto the new subspace. This can be summarized by the mathematical equation: \\mathbf{Y} = \\mathbf{X} \\times \\mathbf{W} (where \\mathbf{X} is a n \\times d -dimensional matrix representing the n samples, and \\mathbf{y} are the transformed n \\times k -dimensional samples in the new subspace).","title":"Summarizing the LDA approach in 5 steps"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#references","text":"Fisher, Ronald A. \" The use of multiple measurements in taxonomic problems. \" Annals of eugenics 7.2 (1936): 179-188. Rao, C. Radhakrishna. \" The utilization of multiple measurements in problems of biological classification. \" Journal of the Royal Statistical Society. Series B (Methodological) 10.2 (1948): 159-203.","title":"References"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#example-1-lda-on-iris","text":"from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import LinearDiscriminantAnalysis X, y = iris_data() X = standardize(X) lda = LinearDiscriminantAnalysis(n_discriminants=2) lda.fit(X, y) X_lda = lda.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_lda[y == lab, 0], X_lda[y == lab, 1], label=lab, c=col) plt.xlabel('Linear Discriminant 1') plt.ylabel('Linear Discriminant 2') plt.legend(loc='lower right') plt.tight_layout() plt.show()","title":"Example 1 - LDA on Iris"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#example-2-plotting-the-between-class-variance-explained-ratio","text":"from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import LinearDiscriminantAnalysis X, y = iris_data() X = standardize(X) lda = LinearDiscriminantAnalysis(n_discriminants=None) lda.fit(X, y) X_lda = lda.transform(X) import numpy as np tot = sum(lda.e_vals_) var_exp = [(i / tot)*100 for i in sorted(lda.e_vals_, reverse=True)] cum_var_exp = np.cumsum(var_exp) with plt.style.context('seaborn-whitegrid'): fig, ax = plt.subplots(figsize=(6, 4)) plt.bar(range(4), var_exp, alpha=0.5, align='center', label='individual explained variance') plt.step(range(4), cum_var_exp, where='mid', label='cumulative explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.xticks(range(4)) ax.set_xticklabels(np.arange(1, X.shape[1] + 1)) plt.legend(loc='best') plt.tight_layout()","title":"Example 2 - Plotting the Between-Class Variance Explained Ratio"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#api","text":"LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/","title":"API"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#methods","text":"fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors.","title":"Methods"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/","text":"Principal Component Analysis Implementation of Principal Component Analysis for dimensionality reduction from mlxtend.feature_extraction import PrincipalComponentAnalysis Overview The sheer size of data in the modern age is not only a challenge for computer hardware but also a main bottleneck for the performance of many machine learning algorithms. The main goal of a PCA analysis is to identify patterns in data; PCA aims to detect the correlation between variables. If a strong correlation between variables exists, the attempt to reduce the dimensionality only makes sense. In a nutshell, this is what PCA is all about: Finding the directions of maximum variance in high-dimensional data and project it onto a smaller dimensional subspace while retaining most of the information. PCA and Dimensionality Reduction Often, the desired goal is to reduce the dimensions of a d -dimensional dataset by projecting it onto a (k) -dimensional subspace (where k\\;<\\;d ) in order to increase the computational efficiency while retaining most of the information. An important question is \"what is the size of k that represents the data 'well'?\" Later, we will compute eigenvectors (the principal components) of a dataset and collect them in a projection matrix. Each of those eigenvectors is associated with an eigenvalue which can be interpreted as the \"length\" or \"magnitude\" of the corresponding eigenvector. If some eigenvalues have a significantly larger magnitude than others that the reduction of the dataset via PCA onto a smaller dimensional subspace by dropping the \"less informative\" eigenpairs is reasonable. A Summary of the PCA Approach Standardize the data. Obtain the Eigenvectors and Eigenvalues from the covariance matrix or correlation matrix, or perform Singular Vector Decomposition. Sort eigenvalues in descending order and choose the k eigenvectors that correspond to the k largest eigenvalues where k is the number of dimensions of the new feature subspace ( k \\le d ). Construct the projection matrix \\mathbf{W} from the selected k eigenvectors. Transform the original dataset \\mathbf{X} via \\mathbf{W} to obtain a k -dimensional feature subspace \\mathbf{Y} . References Pearson, Karl. \"LIII. On lines and planes of closest fit to systems of points in space. \" The London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science 2.11 (1901): 559-572. Example 1 - PCA on Iris from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2) pca.fit(X) X_pca = pca.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_pca[y==lab, 0], X_pca[y==lab, 1], label=lab, c=col) plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.legend(loc='lower center') plt.tight_layout() plt.show() Example 2 - Plotting the Variance Explained Ratio from mlxtend.data import iris_data from mlxtend.preprocessing import standardize X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=None) pca.fit(X) X_pca = pca.transform(X) import numpy as np tot = sum(pca.e_vals_) var_exp = [(i / tot)*100 for i in sorted(pca.e_vals_, reverse=True)] cum_var_exp = np.cumsum(var_exp) with plt.style.context('seaborn-whitegrid'): fig, ax = plt.subplots(figsize=(6, 4)) plt.bar(range(4), var_exp, alpha=0.5, align='center', label='individual explained variance') plt.step(range(4), cum_var_exp, where='mid', label='cumulative explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.xticks(range(4)) ax.set_xticklabels(np.arange(1, X.shape[1] + 1)) plt.legend(loc='best') plt.tight_layout() Example 3 - PCA via SVD While the eigendecomposition of the covariance or correlation matrix may be more intuitiuve, most PCA implementations perform a Singular Vector Decomposition (SVD) to improve the computational efficiency. Another advantage of using SVD is that the results tend to be more numerically stable, since we can decompose the input matrix directly without the additional covariance-matrix step. from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2, solver='svd') pca.fit(X) X_pca = pca.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_pca[y==lab, 0], X_pca[y==lab, 1], label=lab, c=col) plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.legend(loc='lower center') plt.tight_layout() plt.show() If we compare this PCA projection to the previous plot in example 1, we notice that they are mirror images of each other. Note that this is not due to an error in any of those two implementations, but the reason for this difference is that, depending on the eigensolver, eigenvectors can have either negative or positive signs. For instance, if v is an eigenvector of a matrix \\Sigma , we have \\Sigma v = \\lambda v, where \\lambda is our eigenvalue then -v is also an eigenvector that has the same eigenvalue, since \\Sigma(-v) = -\\Sigma v = -\\lambda v = \\lambda(-v). Example 4 - Factor Loadings After evoking the fit method, the factor loadings are available via the loadings_ attribute. In simple terms, the the loadings are the unstandardized values of the eigenvectors. Or in other words, we can interpret the loadings as the covariances (or correlation in case we standardized the input features) between the input features and the and the principal components (or eigenvectors), which have been scaled to unit length. By having the loadings scaled, they become comparable by magnitude and we can assess how much variance in a component is attributed to the input features (as the components are just a weighted linear combination of the input features). from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis import matplotlib.pyplot as plt X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2, solver='eigen') pca.fit(X); xlabels = ['sepal length', 'sepal width', 'petal length', 'petal width'] fig, ax = plt.subplots(1, 2, figsize=(8, 3)) ax[0].bar(range(4), pca.loadings_[:, 0], align='center') ax[1].bar(range(4), pca.loadings_[:, 1], align='center') ax[0].set_ylabel('Factor loading onto PC1') ax[1].set_ylabel('Factor loading onto PC2') ax[0].set_xticks(range(4)) ax[1].set_xticks(range(4)) ax[0].set_xticklabels(xlabels, rotation=45) ax[1].set_xticklabels(xlabels, rotation=45) plt.ylim([-1, 1]) plt.tight_layout() For instance, we may say that most of the variance in the first component is attributed to the petal features (although the loading of sepal length on PC1 is also not much less in magnitude). In contrast, the remaining variance captured by PC2 is mostly due to the sepal width. Note that we know from Example 2 that PC1 explains most of the variance, and based on the information from the loading plots, we may say that petal features combined with sepal length may explain most of the spread in the data. API PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"Principal Component Analysis"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#principal-component-analysis","text":"Implementation of Principal Component Analysis for dimensionality reduction from mlxtend.feature_extraction import PrincipalComponentAnalysis","title":"Principal Component Analysis"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#overview","text":"The sheer size of data in the modern age is not only a challenge for computer hardware but also a main bottleneck for the performance of many machine learning algorithms. The main goal of a PCA analysis is to identify patterns in data; PCA aims to detect the correlation between variables. If a strong correlation between variables exists, the attempt to reduce the dimensionality only makes sense. In a nutshell, this is what PCA is all about: Finding the directions of maximum variance in high-dimensional data and project it onto a smaller dimensional subspace while retaining most of the information.","title":"Overview"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#pca-and-dimensionality-reduction","text":"Often, the desired goal is to reduce the dimensions of a d -dimensional dataset by projecting it onto a (k) -dimensional subspace (where k\\;<\\;d ) in order to increase the computational efficiency while retaining most of the information. An important question is \"what is the size of k that represents the data 'well'?\" Later, we will compute eigenvectors (the principal components) of a dataset and collect them in a projection matrix. Each of those eigenvectors is associated with an eigenvalue which can be interpreted as the \"length\" or \"magnitude\" of the corresponding eigenvector. If some eigenvalues have a significantly larger magnitude than others that the reduction of the dataset via PCA onto a smaller dimensional subspace by dropping the \"less informative\" eigenpairs is reasonable.","title":"PCA and Dimensionality Reduction"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#a-summary-of-the-pca-approach","text":"Standardize the data. Obtain the Eigenvectors and Eigenvalues from the covariance matrix or correlation matrix, or perform Singular Vector Decomposition. Sort eigenvalues in descending order and choose the k eigenvectors that correspond to the k largest eigenvalues where k is the number of dimensions of the new feature subspace ( k \\le d ). Construct the projection matrix \\mathbf{W} from the selected k eigenvectors. Transform the original dataset \\mathbf{X} via \\mathbf{W} to obtain a k -dimensional feature subspace \\mathbf{Y} .","title":"A Summary of the PCA Approach"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#references","text":"Pearson, Karl. \"LIII. On lines and planes of closest fit to systems of points in space. \" The London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science 2.11 (1901): 559-572.","title":"References"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#example-1-pca-on-iris","text":"from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2) pca.fit(X) X_pca = pca.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_pca[y==lab, 0], X_pca[y==lab, 1], label=lab, c=col) plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.legend(loc='lower center') plt.tight_layout() plt.show()","title":"Example 1 - PCA on Iris"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#example-2-plotting-the-variance-explained-ratio","text":"from mlxtend.data import iris_data from mlxtend.preprocessing import standardize X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=None) pca.fit(X) X_pca = pca.transform(X) import numpy as np tot = sum(pca.e_vals_) var_exp = [(i / tot)*100 for i in sorted(pca.e_vals_, reverse=True)] cum_var_exp = np.cumsum(var_exp) with plt.style.context('seaborn-whitegrid'): fig, ax = plt.subplots(figsize=(6, 4)) plt.bar(range(4), var_exp, alpha=0.5, align='center', label='individual explained variance') plt.step(range(4), cum_var_exp, where='mid', label='cumulative explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.xticks(range(4)) ax.set_xticklabels(np.arange(1, X.shape[1] + 1)) plt.legend(loc='best') plt.tight_layout()","title":"Example 2 - Plotting the Variance Explained Ratio"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#example-3-pca-via-svd","text":"While the eigendecomposition of the covariance or correlation matrix may be more intuitiuve, most PCA implementations perform a Singular Vector Decomposition (SVD) to improve the computational efficiency. Another advantage of using SVD is that the results tend to be more numerically stable, since we can decompose the input matrix directly without the additional covariance-matrix step. from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2, solver='svd') pca.fit(X) X_pca = pca.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_pca[y==lab, 0], X_pca[y==lab, 1], label=lab, c=col) plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.legend(loc='lower center') plt.tight_layout() plt.show() If we compare this PCA projection to the previous plot in example 1, we notice that they are mirror images of each other. Note that this is not due to an error in any of those two implementations, but the reason for this difference is that, depending on the eigensolver, eigenvectors can have either negative or positive signs. For instance, if v is an eigenvector of a matrix \\Sigma , we have \\Sigma v = \\lambda v, where \\lambda is our eigenvalue then -v is also an eigenvector that has the same eigenvalue, since \\Sigma(-v) = -\\Sigma v = -\\lambda v = \\lambda(-v).","title":"Example 3 - PCA via SVD"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#example-4-factor-loadings","text":"After evoking the fit method, the factor loadings are available via the loadings_ attribute. In simple terms, the the loadings are the unstandardized values of the eigenvectors. Or in other words, we can interpret the loadings as the covariances (or correlation in case we standardized the input features) between the input features and the and the principal components (or eigenvectors), which have been scaled to unit length. By having the loadings scaled, they become comparable by magnitude and we can assess how much variance in a component is attributed to the input features (as the components are just a weighted linear combination of the input features). from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis import matplotlib.pyplot as plt X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2, solver='eigen') pca.fit(X); xlabels = ['sepal length', 'sepal width', 'petal length', 'petal width'] fig, ax = plt.subplots(1, 2, figsize=(8, 3)) ax[0].bar(range(4), pca.loadings_[:, 0], align='center') ax[1].bar(range(4), pca.loadings_[:, 1], align='center') ax[0].set_ylabel('Factor loading onto PC1') ax[1].set_ylabel('Factor loading onto PC2') ax[0].set_xticks(range(4)) ax[1].set_xticks(range(4)) ax[0].set_xticklabels(xlabels, rotation=45) ax[1].set_xticklabels(xlabels, rotation=45) plt.ylim([-1, 1]) plt.tight_layout() For instance, we may say that most of the variance in the first component is attributed to the petal features (although the loading of sepal length on PC1 is also not much less in magnitude). In contrast, the remaining variance captured by PC2 is mostly due to the sepal width. Note that we know from Example 2 that PC1 explains most of the variance, and based on the information from the loading plots, we may say that petal features combined with sepal length may explain most of the spread in the data.","title":"Example 4 - Factor Loadings"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#api","text":"PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/","title":"API"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#methods","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"Methods"},{"location":"user_guide/feature_extraction/RBFKernelPCA/","text":"RBF Kernel Principal Component Analysis Implementation of RBF Kernel Principal Component Analysis for non-linear dimensionality reduction from mlxtend.feature_extraction import RBFKernelPCA Overview Most machine learning algorithms have been developed and statistically validated for linearly separable data. Popular examples are linear classifiers like Support Vector Machines (SVMs) or the (standard) Principal Component Analysis (PCA) for dimensionality reduction. However, most real world data requires nonlinear methods in order to perform tasks that involve the analysis and discovery of patterns successfully. The focus of this overview is to briefly introduce the idea of kernel methods and to implement a Gaussian radius basis function (RBF) kernel that is used to perform nonlinear dimensionality reduction via BF kernel principal component analysis (kPCA). Principal Component Analysis The main purpose of principal component analysis (PCA) is the analysis of data to identify patterns that represent the data \u201cwell.\u201d The principal components can be understood as new axes of the dataset that maximize the variance along those axes (the eigenvectors of the covariance matrix). In other words, PCA aims to find the axes with maximum variances along which the data is most spread. For more details, please see the related article on mlxtend.feature_extraction.PrincipalComponentAnalysis . Nonlinear dimensionality reduction The \u201cclassic\u201d PCA approach described above is a linear projection technique that works well if the data is linearly separable. However, in the case of linearly inseparable data, a nonlinear technique is required if the task is to reduce the dimensionality of a dataset. Kernel functions and the kernel trick The basic idea to deal with linearly inseparable data is to project it onto a higher dimensional space where it becomes linearly separable. Let us call this nonlinear mapping function \\phi so that the mapping of a sample \\mathbf{x} can be written as \\mathbf{x} \\rightarrow \\phi (\\mathbf{x}) , which is called \"kernel function.\" Now, the term \"kernel\" describes a function that calculates the dot product of the images of the samples \\mathbf{x} under \\phi . \\kappa(\\mathbf{x_i, x_j}) = \\phi (\\mathbf{x_i}) \\phi (\\mathbf{x_j})^T More details about the derivation of this equation are provided in this excellent review article by Quan Wang: Kernel Principal Component Analysis and its Applications in Face Recognition and Active Shape Models .[ 1 ] In other words, the function \\phi maps the original d-dimensional features into a larger, k-dimensional feature space by creating nononlinear combinations of the original features. For example, if \\mathbf{x} consists of 2 features: \\mathbf{x} = \\big[x_1 \\quad x_2\\big]^T \\quad \\quad \\mathbf{x} \\in I\\!R^d \\Downarrow \\phi \\mathbf{x}' = \\big[x_1 \\quad x_2 \\quad x_1 x_2 \\quad x_{1}^2 \\quad x_1 x_{2}^3 \\quad \\dots \\big]^T \\quad \\quad \\mathbf{x} \\in I\\!R^k (k >> d) Often, the mathematical definition of the RBF kernel is written and implemented as \\kappa(\\mathbf{x_i, x_j}) = exp\\bigg(- \\gamma \\; \\lVert\\mathbf{x_i - x_j }\\rVert^{2}_{2} \\bigg) where \\textstyle\\gamma = \\tfrac{1}{2\\sigma^2} is a free parameter that is to be optimized. Gaussian radial basis function (RBF) Kernel PCA In the linear PCA approach, we are interested in the principal components that maximize the variance in the dataset. This is done by extracting the eigenvectors (principle components) that correspond to the largest eigenvalues based on the covariance matrix: \\text{Cov} = \\frac{1}{N} \\sum_{i=1}^{N} \\mathbf{x_i} \\mathbf{x_i}^T Bernhard Scholkopf ( Kernel Principal Component Analysis [ 2 ]) generalized this approach for data that was mapped onto the higher dimensional space via a kernel function: \\text{Cov} = \\frac{1}{N} \\sum_{i=1}^{N} \\phi(\\mathbf{x_i}) \\phi(\\mathbf{x_i})^T However, in practice the the covariance matrix in the higher dimensional space is not calculated explicitly (kernel trick). Therefore, the implementation of RBF kernel PCA does not yield the principal component axes (in contrast to the standard PCA), but the obtained eigenvectors can be understood as projections of the data onto the principal components. RBF kernel PCA step-by-step 1. Computation of the kernel (similarity) matrix. In this first step, we need to calculate \\kappa(\\mathbf{x_i, x_j}) = exp\\bigg(- \\gamma \\; \\lVert\\mathbf{x_i - x_j }\\rVert^{2}_{2} \\bigg) for every pair of points. E.g., if we have a dataset of 100 samples, this step would result in a symmetric 100x100 kernel matrix. 2. Eigendecomposition of the kernel matrix. Since it is not guaranteed that the kernel matrix is centered, we can apply the following equation to do so: K' = K - \\mathbf{1_N} K - K \\mathbf{1_N} + \\mathbf{1_N} K \\mathbf{1_N} where \\mathbf{1_N} is (like the kernel matrix) a N\\times N matrix with all values equal to \\frac{1}{N} . [ 3 ] Now, we have to obtain the eigenvectors of the centered kernel matrix that correspond to the largest eigenvalues. Those eigenvectors are the data points already projected onto the respective principal components. Projecting new data So far, so good, in the sections above, we have been projecting an dataset onto a new feature subspace. However, in a real application, we are usually interested in mapping new data points onto the same new feature subspace (e.g., if are working with a training and a test dataset in pattern classification tasks). Remember, when we computed the eigenvectors \\mathbf{\\alpha} of the centered kernel matrix, those values were actually already the projected datapoints onto the principal component axis \\mathbf{g} . If we want to project a new data point \\mathbf{x} onto this principal component axis, we'd need to compute \\phi(\\mathbf{x})^T \\mathbf{g} . Fortunately, also here, we don't have to compute \\phi(\\mathbf{x})^T \\mathbf{g} explicitely but use the kernel trick to calculate the RBF kernel between the new data point and every data point j in the training dataset: \\phi(\\mathbf{x})^T \\mathbf{g} = \\sum_j \\alpha_{i} \\; \\phi(\\mathbf{x}) \\; \\phi(\\mathbf{x_j})^T = \\sum_j \\alpha_{i} \\; \\kappa(\\mathbf{x}, \\mathbf{x_j}) and the eigenvectors \\alpha and eigenvalues \\lambda of the Kernel matrix \\mathbf{K} satisfy the equation \\mathbf{K} \\alpha = \\lambda \\alpha , we just need to normalize the eigenvector by the corresponding eigenvalue. References [1] Q. Wang. Kernel principal component analysis and its applications in face recognition and active shape models . CoRR, abs/1207.3538, 2012. [2] B. Scholkopf, A. Smola, and K.-R. Muller. Kernel principal component analysis . pages 583\u2013588, 1997. [3] B. Scholkopf, A. Smola, and K.-R. Muller. Nonlinear component analysis as a kernel eigenvalue problem . Neural computation, 10(5):1299\u20131319, 1998. Example 1 - Half-moon shapes We will start with a simple example of 2 half-moon shapes generated by the make_moons function from scikit-learn. import matplotlib.pyplot as plt from sklearn.datasets import make_moons X, y = make_moons(n_samples=50, random_state=1) plt.scatter(X[y==0, 0], X[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', marker='^', alpha=0.5) plt.ylabel('y coordinate') plt.xlabel('x coordinate') plt.show() Since the two half-moon shapes are linearly inseparable, we expect that the \u201cclassic\u201d PCA will fail to give us a \u201cgood\u201d representation of the data in 1D space. Let us use PCA class to perform the dimensionality reduction. from mlxtend.feature_extraction import PrincipalComponentAnalysis as PCA pca = PCA(n_components=2) X_pca = pca.fit(X).transform(X) plt.scatter(X_pca[y==0, 0], X_pca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_pca[y==1, 0], X_pca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.xlabel('PC1') plt.ylabel('PC2') plt.show() As we can see, the resulting principal components do not yield a subspace where the data is linearly separated well. Note that PCA is a unsupervised method and does not \u201cconsider\u201d class labels in order to maximize the variance in contrast to Linear Discriminant Analysis. Here, the colors blue and red are just added for visualization purposes to indicate the degree of separation. Next, we will perform dimensionality reduction via RBF kernel PCA on our half-moon data. The choice of \\gamma depends on the dataset and can be obtained via hyperparameter tuning techniques like Grid Search. Hyperparameter tuning is a broad topic itself, and here I will just use a \\gamma -value that I found to produce \u201cgood\u201d results. from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import RBFKernelPCA as KPCA kpca = KPCA(gamma=15.0, n_components=2) kpca.fit(X) X_kpca = kpca.X_projected_ Please note that the components of kernel methods such as RBF kernel PCA already represent the projected data points (in contrast to PCA, where the component axis are the \"top k\" eigenvectors thar are used to contruct a projection matrix, which is then used to transform the training samples). Thus, the projected training set is available after fitting via the .X_projected_ attribute. plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.title('First 2 principal components after RBF Kernel PCA') plt.xlabel('PC1') plt.ylabel('PC2') plt.show() The new feature space is linearly separable now. Since we are often interested in dimensionality reduction, let's have a look at the first component only. import numpy as np plt.scatter(X_kpca[y==0, 0], np.zeros((25, 1)), color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], np.zeros((25, 1)), color='blue', marker='^', alpha=0.5) plt.title('First principal component after RBF Kernel PCA') plt.xlabel('PC1') plt.yticks([]) plt.show() We can clearly see that the projection via RBF kernel PCA yielded a subspace where the classes are separated well. Such a subspace can then be used as input for generalized linear classification models, e.g., logistic regression. Projecting new data Finally, via the transform method, we can project new data onto the new component axes. import matplotlib.pyplot as plt from sklearn.datasets import make_moons X2, y2 = make_moons(n_samples=200, random_state=5) X2_kpca = kpca.transform(X2) plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5, label='fit data') plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5, label='fit data') plt.scatter(X2_kpca[y2==0, 0], X2_kpca[y2==0, 1], color='orange', marker='v', alpha=0.2, label='new data') plt.scatter(X2_kpca[y2==1, 0], X2_kpca[y2==1, 1], color='cyan', marker='s', alpha=0.2, label='new data') plt.legend() plt.show() Example 2 - Concentric circles Following the concepts explained in example 1, let's have a look at another classic case: 2 concentric circles with random noise produced by scikit-learn\u2019s make_circles . from sklearn.datasets import make_circles X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2) plt.figure(figsize=(8,6)) plt.scatter(X[y==0, 0], X[y==0, 1], color='red', alpha=0.5) plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', alpha=0.5) plt.title('Concentric circles') plt.ylabel('y coordinate') plt.xlabel('x coordinate') plt.show() from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import RBFKernelPCA as KPCA kpca = KPCA(gamma=15.0, n_components=2) kpca.fit(X) X_kpca = kpca.X_projected_ plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.title('First 2 principal components after RBF Kernel PCA') plt.xlabel('PC1') plt.ylabel('PC2') plt.show() plt.scatter(X_kpca[y==0, 0], np.zeros((500, 1)), color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], np.zeros((500, 1)), color='blue', marker='^', alpha=0.5) plt.title('First principal component after RBF Kernel PCA') plt.xlabel('PC1') plt.yticks([]) plt.show() API RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"RBFKernelPCA"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#rbf-kernel-principal-component-analysis","text":"Implementation of RBF Kernel Principal Component Analysis for non-linear dimensionality reduction from mlxtend.feature_extraction import RBFKernelPCA","title":"RBF Kernel Principal Component Analysis"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#overview","text":"Most machine learning algorithms have been developed and statistically validated for linearly separable data. Popular examples are linear classifiers like Support Vector Machines (SVMs) or the (standard) Principal Component Analysis (PCA) for dimensionality reduction. However, most real world data requires nonlinear methods in order to perform tasks that involve the analysis and discovery of patterns successfully. The focus of this overview is to briefly introduce the idea of kernel methods and to implement a Gaussian radius basis function (RBF) kernel that is used to perform nonlinear dimensionality reduction via BF kernel principal component analysis (kPCA).","title":"Overview"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#principal-component-analysis","text":"The main purpose of principal component analysis (PCA) is the analysis of data to identify patterns that represent the data \u201cwell.\u201d The principal components can be understood as new axes of the dataset that maximize the variance along those axes (the eigenvectors of the covariance matrix). In other words, PCA aims to find the axes with maximum variances along which the data is most spread. For more details, please see the related article on mlxtend.feature_extraction.PrincipalComponentAnalysis .","title":"Principal Component Analysis"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#nonlinear-dimensionality-reduction","text":"The \u201cclassic\u201d PCA approach described above is a linear projection technique that works well if the data is linearly separable. However, in the case of linearly inseparable data, a nonlinear technique is required if the task is to reduce the dimensionality of a dataset.","title":"Nonlinear dimensionality reduction"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#kernel-functions-and-the-kernel-trick","text":"The basic idea to deal with linearly inseparable data is to project it onto a higher dimensional space where it becomes linearly separable. Let us call this nonlinear mapping function \\phi so that the mapping of a sample \\mathbf{x} can be written as \\mathbf{x} \\rightarrow \\phi (\\mathbf{x}) , which is called \"kernel function.\" Now, the term \"kernel\" describes a function that calculates the dot product of the images of the samples \\mathbf{x} under \\phi . \\kappa(\\mathbf{x_i, x_j}) = \\phi (\\mathbf{x_i}) \\phi (\\mathbf{x_j})^T More details about the derivation of this equation are provided in this excellent review article by Quan Wang: Kernel Principal Component Analysis and its Applications in Face Recognition and Active Shape Models .[ 1 ] In other words, the function \\phi maps the original d-dimensional features into a larger, k-dimensional feature space by creating nononlinear combinations of the original features. For example, if \\mathbf{x} consists of 2 features: \\mathbf{x} = \\big[x_1 \\quad x_2\\big]^T \\quad \\quad \\mathbf{x} \\in I\\!R^d \\Downarrow \\phi \\mathbf{x}' = \\big[x_1 \\quad x_2 \\quad x_1 x_2 \\quad x_{1}^2 \\quad x_1 x_{2}^3 \\quad \\dots \\big]^T \\quad \\quad \\mathbf{x} \\in I\\!R^k (k >> d) Often, the mathematical definition of the RBF kernel is written and implemented as \\kappa(\\mathbf{x_i, x_j}) = exp\\bigg(- \\gamma \\; \\lVert\\mathbf{x_i - x_j }\\rVert^{2}_{2} \\bigg) where \\textstyle\\gamma = \\tfrac{1}{2\\sigma^2} is a free parameter that is to be optimized.","title":"Kernel functions and the kernel trick"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#gaussian-radial-basis-function-rbf-kernel-pca","text":"In the linear PCA approach, we are interested in the principal components that maximize the variance in the dataset. This is done by extracting the eigenvectors (principle components) that correspond to the largest eigenvalues based on the covariance matrix: \\text{Cov} = \\frac{1}{N} \\sum_{i=1}^{N} \\mathbf{x_i} \\mathbf{x_i}^T Bernhard Scholkopf ( Kernel Principal Component Analysis [ 2 ]) generalized this approach for data that was mapped onto the higher dimensional space via a kernel function: \\text{Cov} = \\frac{1}{N} \\sum_{i=1}^{N} \\phi(\\mathbf{x_i}) \\phi(\\mathbf{x_i})^T However, in practice the the covariance matrix in the higher dimensional space is not calculated explicitly (kernel trick). Therefore, the implementation of RBF kernel PCA does not yield the principal component axes (in contrast to the standard PCA), but the obtained eigenvectors can be understood as projections of the data onto the principal components.","title":"Gaussian radial basis function (RBF) Kernel PCA"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#rbf-kernel-pca-step-by-step","text":"","title":"RBF kernel PCA step-by-step"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#1-computation-of-the-kernel-similarity-matrix","text":"In this first step, we need to calculate \\kappa(\\mathbf{x_i, x_j}) = exp\\bigg(- \\gamma \\; \\lVert\\mathbf{x_i - x_j }\\rVert^{2}_{2} \\bigg) for every pair of points. E.g., if we have a dataset of 100 samples, this step would result in a symmetric 100x100 kernel matrix.","title":"1. Computation of the kernel (similarity) matrix."},{"location":"user_guide/feature_extraction/RBFKernelPCA/#2-eigendecomposition-of-the-kernel-matrix","text":"Since it is not guaranteed that the kernel matrix is centered, we can apply the following equation to do so: K' = K - \\mathbf{1_N} K - K \\mathbf{1_N} + \\mathbf{1_N} K \\mathbf{1_N} where \\mathbf{1_N} is (like the kernel matrix) a N\\times N matrix with all values equal to \\frac{1}{N} . [ 3 ] Now, we have to obtain the eigenvectors of the centered kernel matrix that correspond to the largest eigenvalues. Those eigenvectors are the data points already projected onto the respective principal components.","title":"2. Eigendecomposition of the kernel matrix."},{"location":"user_guide/feature_extraction/RBFKernelPCA/#projecting-new-data","text":"So far, so good, in the sections above, we have been projecting an dataset onto a new feature subspace. However, in a real application, we are usually interested in mapping new data points onto the same new feature subspace (e.g., if are working with a training and a test dataset in pattern classification tasks). Remember, when we computed the eigenvectors \\mathbf{\\alpha} of the centered kernel matrix, those values were actually already the projected datapoints onto the principal component axis \\mathbf{g} . If we want to project a new data point \\mathbf{x} onto this principal component axis, we'd need to compute \\phi(\\mathbf{x})^T \\mathbf{g} . Fortunately, also here, we don't have to compute \\phi(\\mathbf{x})^T \\mathbf{g} explicitely but use the kernel trick to calculate the RBF kernel between the new data point and every data point j in the training dataset: \\phi(\\mathbf{x})^T \\mathbf{g} = \\sum_j \\alpha_{i} \\; \\phi(\\mathbf{x}) \\; \\phi(\\mathbf{x_j})^T = \\sum_j \\alpha_{i} \\; \\kappa(\\mathbf{x}, \\mathbf{x_j}) and the eigenvectors \\alpha and eigenvalues \\lambda of the Kernel matrix \\mathbf{K} satisfy the equation \\mathbf{K} \\alpha = \\lambda \\alpha , we just need to normalize the eigenvector by the corresponding eigenvalue.","title":"Projecting new data"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#references","text":"[1] Q. Wang. Kernel principal component analysis and its applications in face recognition and active shape models . CoRR, abs/1207.3538, 2012. [2] B. Scholkopf, A. Smola, and K.-R. Muller. Kernel principal component analysis . pages 583\u2013588, 1997. [3] B. Scholkopf, A. Smola, and K.-R. Muller. Nonlinear component analysis as a kernel eigenvalue problem . Neural computation, 10(5):1299\u20131319, 1998.","title":"References"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#example-1-half-moon-shapes","text":"We will start with a simple example of 2 half-moon shapes generated by the make_moons function from scikit-learn. import matplotlib.pyplot as plt from sklearn.datasets import make_moons X, y = make_moons(n_samples=50, random_state=1) plt.scatter(X[y==0, 0], X[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', marker='^', alpha=0.5) plt.ylabel('y coordinate') plt.xlabel('x coordinate') plt.show() Since the two half-moon shapes are linearly inseparable, we expect that the \u201cclassic\u201d PCA will fail to give us a \u201cgood\u201d representation of the data in 1D space. Let us use PCA class to perform the dimensionality reduction. from mlxtend.feature_extraction import PrincipalComponentAnalysis as PCA pca = PCA(n_components=2) X_pca = pca.fit(X).transform(X) plt.scatter(X_pca[y==0, 0], X_pca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_pca[y==1, 0], X_pca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.xlabel('PC1') plt.ylabel('PC2') plt.show() As we can see, the resulting principal components do not yield a subspace where the data is linearly separated well. Note that PCA is a unsupervised method and does not \u201cconsider\u201d class labels in order to maximize the variance in contrast to Linear Discriminant Analysis. Here, the colors blue and red are just added for visualization purposes to indicate the degree of separation. Next, we will perform dimensionality reduction via RBF kernel PCA on our half-moon data. The choice of \\gamma depends on the dataset and can be obtained via hyperparameter tuning techniques like Grid Search. Hyperparameter tuning is a broad topic itself, and here I will just use a \\gamma -value that I found to produce \u201cgood\u201d results. from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import RBFKernelPCA as KPCA kpca = KPCA(gamma=15.0, n_components=2) kpca.fit(X) X_kpca = kpca.X_projected_ Please note that the components of kernel methods such as RBF kernel PCA already represent the projected data points (in contrast to PCA, where the component axis are the \"top k\" eigenvectors thar are used to contruct a projection matrix, which is then used to transform the training samples). Thus, the projected training set is available after fitting via the .X_projected_ attribute. plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.title('First 2 principal components after RBF Kernel PCA') plt.xlabel('PC1') plt.ylabel('PC2') plt.show() The new feature space is linearly separable now. Since we are often interested in dimensionality reduction, let's have a look at the first component only. import numpy as np plt.scatter(X_kpca[y==0, 0], np.zeros((25, 1)), color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], np.zeros((25, 1)), color='blue', marker='^', alpha=0.5) plt.title('First principal component after RBF Kernel PCA') plt.xlabel('PC1') plt.yticks([]) plt.show() We can clearly see that the projection via RBF kernel PCA yielded a subspace where the classes are separated well. Such a subspace can then be used as input for generalized linear classification models, e.g., logistic regression.","title":"Example 1 - Half-moon shapes"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#projecting-new-data_1","text":"Finally, via the transform method, we can project new data onto the new component axes. import matplotlib.pyplot as plt from sklearn.datasets import make_moons X2, y2 = make_moons(n_samples=200, random_state=5) X2_kpca = kpca.transform(X2) plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5, label='fit data') plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5, label='fit data') plt.scatter(X2_kpca[y2==0, 0], X2_kpca[y2==0, 1], color='orange', marker='v', alpha=0.2, label='new data') plt.scatter(X2_kpca[y2==1, 0], X2_kpca[y2==1, 1], color='cyan', marker='s', alpha=0.2, label='new data') plt.legend() plt.show()","title":"Projecting new data"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#example-2-concentric-circles","text":"Following the concepts explained in example 1, let's have a look at another classic case: 2 concentric circles with random noise produced by scikit-learn\u2019s make_circles . from sklearn.datasets import make_circles X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2) plt.figure(figsize=(8,6)) plt.scatter(X[y==0, 0], X[y==0, 1], color='red', alpha=0.5) plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', alpha=0.5) plt.title('Concentric circles') plt.ylabel('y coordinate') plt.xlabel('x coordinate') plt.show() from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import RBFKernelPCA as KPCA kpca = KPCA(gamma=15.0, n_components=2) kpca.fit(X) X_kpca = kpca.X_projected_ plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.title('First 2 principal components after RBF Kernel PCA') plt.xlabel('PC1') plt.ylabel('PC2') plt.show() plt.scatter(X_kpca[y==0, 0], np.zeros((500, 1)), color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], np.zeros((500, 1)), color='blue', marker='^', alpha=0.5) plt.title('First principal component after RBF Kernel PCA') plt.xlabel('PC1') plt.yticks([]) plt.show()","title":"Example 2 - Concentric circles"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#api","text":"RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/","title":"API"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#methods","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"Methods"},{"location":"user_guide/feature_selection/ColumnSelector/","text":"ColumnSelector Implementation of a column selector class for scikit-learn pipelines. from mlxtend.feature_selection import ColumnSelector Overview The ColumnSelector can be used for \"manual\" feature selection, e.g., as part of a grid search via a scikit-learn pipeline. References - Example 1 - Fitting an Estimator on a Feature Subset Load a simple benchmark dataset: from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target The ColumnSelector is a simple transformer class that selects specific columns (features) from a datast. For instance, using the transform method returns a reduced dataset that only contains two features (here: the first two features via the indices 0 and 1, respectively): from mlxtend.feature_selection import ColumnSelector col_selector = ColumnSelector(cols=(0, 1)) # col_selector.fit(X) # optional, does not do anything col_selector.transform(X).shape (150, 2) ColumnSelector works both with numpy arrays and pandas dataframes: import pandas as pd iris_df = pd.DataFrame(iris.data, columns=iris.feature_names) iris_df.head() .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 col_selector = ColumnSelector(cols=(\"sepal length (cm)\", \"sepal width (cm)\")) col_selector.transform(iris_df).shape (150, 2) Similarly, we can use the ColumnSelector as part of a scikit-learn Pipeline : from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline pipe = make_pipeline(StandardScaler(), ColumnSelector(cols=(0, 1)), KNeighborsClassifier()) pipe.fit(X, y) pipe.score(X, y) 0.83999999999999997 Example 2 - Feature Selection via GridSearch Example 1 showed a simple useage example of the ColumnSelector ; however, selecting columns from a dataset is trivial and does not require a specific transformer class since we could have achieved the same results via classifier.fit(X[:, :2], y) classifier.score(X[:, :2], y) However, the ColumnSelector becomes really useful for feature selection as part of a grid search as shown in this example. Load a simple benchmark dataset: from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target Create all possible combinations: from itertools import combinations all_comb = [] for size in range(1, 5): all_comb += list(combinations(range(X.shape[1]), r=size)) print(all_comb) [(0,), (1,), (2,), (3,), (0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3), (0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3), (0, 1, 2, 3)] Feature and model selection via grid search: from mlxtend.feature_selection import ColumnSelector from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline pipe = make_pipeline(StandardScaler(), ColumnSelector(), KNeighborsClassifier()) param_grid = {'columnselector__cols': all_comb, 'kneighborsclassifier__n_neighbors': list(range(1, 11))} grid = GridSearchCV(pipe, param_grid, cv=5, n_jobs=-1) grid.fit(X, y) print('Best parameters:', grid.best_params_) print('Best performance:', grid.best_score_) Best parameters: {'columnselector__cols': (2, 3), 'kneighborsclassifier__n_neighbors': 1} Best performance: 0.98 API ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features","title":"ColumnSelector"},{"location":"user_guide/feature_selection/ColumnSelector/#columnselector","text":"Implementation of a column selector class for scikit-learn pipelines. from mlxtend.feature_selection import ColumnSelector","title":"ColumnSelector"},{"location":"user_guide/feature_selection/ColumnSelector/#overview","text":"The ColumnSelector can be used for \"manual\" feature selection, e.g., as part of a grid search via a scikit-learn pipeline.","title":"Overview"},{"location":"user_guide/feature_selection/ColumnSelector/#references","text":"-","title":"References"},{"location":"user_guide/feature_selection/ColumnSelector/#example-1-fitting-an-estimator-on-a-feature-subset","text":"Load a simple benchmark dataset: from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target The ColumnSelector is a simple transformer class that selects specific columns (features) from a datast. For instance, using the transform method returns a reduced dataset that only contains two features (here: the first two features via the indices 0 and 1, respectively): from mlxtend.feature_selection import ColumnSelector col_selector = ColumnSelector(cols=(0, 1)) # col_selector.fit(X) # optional, does not do anything col_selector.transform(X).shape (150, 2) ColumnSelector works both with numpy arrays and pandas dataframes: import pandas as pd iris_df = pd.DataFrame(iris.data, columns=iris.feature_names) iris_df.head() .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 col_selector = ColumnSelector(cols=(\"sepal length (cm)\", \"sepal width (cm)\")) col_selector.transform(iris_df).shape (150, 2) Similarly, we can use the ColumnSelector as part of a scikit-learn Pipeline : from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline pipe = make_pipeline(StandardScaler(), ColumnSelector(cols=(0, 1)), KNeighborsClassifier()) pipe.fit(X, y) pipe.score(X, y) 0.83999999999999997","title":"Example 1 - Fitting an Estimator on a Feature Subset"},{"location":"user_guide/feature_selection/ColumnSelector/#example-2-feature-selection-via-gridsearch","text":"Example 1 showed a simple useage example of the ColumnSelector ; however, selecting columns from a dataset is trivial and does not require a specific transformer class since we could have achieved the same results via classifier.fit(X[:, :2], y) classifier.score(X[:, :2], y) However, the ColumnSelector becomes really useful for feature selection as part of a grid search as shown in this example. Load a simple benchmark dataset: from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target Create all possible combinations: from itertools import combinations all_comb = [] for size in range(1, 5): all_comb += list(combinations(range(X.shape[1]), r=size)) print(all_comb) [(0,), (1,), (2,), (3,), (0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3), (0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3), (0, 1, 2, 3)] Feature and model selection via grid search: from mlxtend.feature_selection import ColumnSelector from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline pipe = make_pipeline(StandardScaler(), ColumnSelector(), KNeighborsClassifier()) param_grid = {'columnselector__cols': all_comb, 'kneighborsclassifier__n_neighbors': list(range(1, 11))} grid = GridSearchCV(pipe, param_grid, cv=5, n_jobs=-1) grid.fit(X, y) print('Best parameters:', grid.best_params_) print('Best performance:', grid.best_score_) Best parameters: {'columnselector__cols': (2, 3), 'kneighborsclassifier__n_neighbors': 1} Best performance: 0.98","title":"Example 2 - Feature Selection via GridSearch"},{"location":"user_guide/feature_selection/ColumnSelector/#api","text":"ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/","title":"API"},{"location":"user_guide/feature_selection/ColumnSelector/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features","title":"Methods"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/","text":"Exhaustive Feature Selector Implementation of an exhaustive feature selector for sampling and evaluating all possible feature combinations in a specified range. from mlxtend.feature_selection import ExhaustiveFeatureSelector Overview This exhaustive feature selection algorithm is a wrapper approach for brute-force evaluation of feature subsets; the best subset is selected by optimizing a specified performance metric given an arbitrary regressor or classifier. For instance, if the classifier is a logistic regression and the dataset consists of 4 features, the alogorithm will evaluate all 15 feature combinations (if min_features=1 and max_features=4 ) {0} {1} {2} {3} {0, 1} {0, 2} {0, 3} {1, 2} {1, 3} {2, 3} {0, 1, 2} {0, 1, 3} {0, 2, 3} {1, 2, 3} {0, 1, 2, 3} and select the one that results in the best performance (e.g., classification accuracy) of the logistic regression classifier. Example 1 - A simple Iris Example Initializing a simple classifier from scikit-learn: from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) efs1 = efs1.fit(X, y) print('Best accuracy score: %.2f' % efs1.best_score_) print('Best subset (indices):', efs1.best_idx_) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best accuracy score: 0.97 Best subset (indices): (0, 2, 3) Best subset (corresponding names): ('0', '2', '3') Note that in the example above, the 'best_feature_names_' are simply a string equivalent of the feature indices. However, we can provide custom feature names to the fit function for this mapping: feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') efs1 = efs1.fit(X, y, custom_feature_names=feature_names) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best subset (corresponding names): ('sepal length', 'petal length', 'petal width') Via the subsets_ attribute, we can take a look at the selected feature indices at each step: efs1.subsets_ {0: {'avg_score': 0.65999999999999992, 'cv_scores': array([ 0.53333333, 0.63333333, 0.73333333, 0.76666667, 0.63333333]), 'feature_idx': (0,), 'feature_names': ('sepal length',)}, 1: {'avg_score': 0.56666666666666665, 'cv_scores': array([ 0.53333333, 0.63333333, 0.6 , 0.5 , 0.56666667]), 'feature_idx': (1,), 'feature_names': ('sepal width',)}, 2: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.93333333, 1. , 0.9 , 0.93333333, 1. ]), 'feature_idx': (2,), 'feature_names': ('petal length',)}, 3: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.86666667, 1. ]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 4: {'avg_score': 0.72666666666666668, 'cv_scores': array([ 0.66666667, 0.8 , 0.63333333, 0.86666667, 0.66666667]), 'feature_idx': (0, 1), 'feature_names': ('sepal length', 'sepal width')}, 5: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 1. , 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (0, 2), 'feature_names': ('sepal length', 'petal length')}, 6: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.96666667, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (0, 3), 'feature_names': ('sepal length', 'petal width')}, 7: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 1. , 0.9 , 0.93333333, 0.93333333]), 'feature_idx': (1, 2), 'feature_names': ('sepal width', 'petal length')}, 8: {'avg_score': 0.94000000000000006, 'cv_scores': array([ 0.96666667, 0.96666667, 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (1, 3), 'feature_names': ('sepal width', 'petal width')}, 9: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.96666667, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (2, 3), 'feature_names': ('petal length', 'petal width')}, 10: {'avg_score': 0.94000000000000006, 'cv_scores': array([ 0.96666667, 0.96666667, 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (0, 1, 2), 'feature_names': ('sepal length', 'sepal width', 'petal length')}, 11: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.93333333, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (0, 1, 3), 'feature_names': ('sepal length', 'sepal width', 'petal width')}, 12: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.96666667, 0.96666667, 0.96666667, 0.96666667, 1. ]), 'feature_idx': (0, 2, 3), 'feature_names': ('sepal length', 'petal length', 'petal width')}, 13: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.93333333, 1. ]), 'feature_idx': (1, 2, 3), 'feature_names': ('sepal width', 'petal length', 'petal width')}, 14: {'avg_score': 0.96666666666666679, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.96666667, 1. ]), 'feature_idx': (0, 1, 2, 3), 'feature_names': ('sepal length', 'sepal width', 'petal length', 'petal width')}} Example 2 - Visualizing the feature selection results For our convenience, we can visualize the output from the feature selection in a pandas DataFrame format using the get_metric_dict method of the ExhaustiveFeatureSelector object. The columns std_dev and std_err represent the standard deviation and standard errors of the cross-validation scores, respectively. Below, we see the DataFrame of the Sequential Forward Selector from Example 2: import pandas as pd iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') efs1 = efs1.fit(X, y, custom_feature_names=feature_names) df = pd.DataFrame.from_dict(efs1.get_metric_dict()).T df.sort_values('avg_score', inplace=True, ascending=False) df Features: 15/15 .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 12 0.973333 0.0171372 [0.966666666667, 0.966666666667, 0.96666666666... (0, 2, 3) (sepal length, petal length, petal width) 0.0133333 0.00666667 14 0.966667 0.0270963 [0.966666666667, 0.966666666667, 0.93333333333... (0, 1, 2, 3) (sepal length, sepal width, petal length, peta... 0.0210819 0.0105409 13 0.96 0.0320608 [0.966666666667, 0.966666666667, 0.93333333333... (1, 2, 3) (sepal width, petal length, petal width) 0.0249444 0.0124722 2 0.953333 0.0514116 [0.933333333333, 1.0, 0.9, 0.933333333333, 1.0] (2,) (petal length,) 0.04 0.02 6 0.953333 0.0436915 [0.966666666667, 0.966666666667, 0.9, 0.933333... (0, 3) (sepal length, petal width) 0.0339935 0.0169967 9 0.953333 0.0436915 [0.966666666667, 0.966666666667, 0.9, 0.933333... (2, 3) (petal length, petal width) 0.0339935 0.0169967 3 0.946667 0.0581151 [0.966666666667, 0.966666666667, 0.93333333333... (3,) (petal width,) 0.0452155 0.0226078 5 0.946667 0.0581151 [0.966666666667, 1.0, 0.866666666667, 0.933333... (0, 2) (sepal length, petal length) 0.0452155 0.0226078 7 0.946667 0.0436915 [0.966666666667, 1.0, 0.9, 0.933333333333, 0.9... (1, 2) (sepal width, petal length) 0.0339935 0.0169967 11 0.946667 0.0436915 [0.933333333333, 0.966666666667, 0.9, 0.933333... (0, 1, 3) (sepal length, sepal width, petal width) 0.0339935 0.0169967 8 0.94 0.0499631 [0.966666666667, 0.966666666667, 0.86666666666... (1, 3) (sepal width, petal width) 0.038873 0.0194365 10 0.94 0.0499631 [0.966666666667, 0.966666666667, 0.86666666666... (0, 1, 2) (sepal length, sepal width, petal length) 0.038873 0.0194365 4 0.726667 0.11623 [0.666666666667, 0.8, 0.633333333333, 0.866666... (0, 1) (sepal length, sepal width) 0.0904311 0.0452155 0 0.66 0.106334 [0.533333333333, 0.633333333333, 0.73333333333... (0,) (sepal length,) 0.0827312 0.0413656 1 0.566667 0.0605892 [0.533333333333, 0.633333333333, 0.6, 0.5, 0.5... (1,) (sepal width,) 0.0471405 0.0235702 import matplotlib.pyplot as plt metric_dict = efs1.get_metric_dict() fig = plt.figure() k_feat = sorted(metric_dict.keys()) avg = [metric_dict[k]['avg_score'] for k in k_feat] upper, lower = [], [] for k in k_feat: upper.append(metric_dict[k]['avg_score'] + metric_dict[k]['std_dev']) lower.append(metric_dict[k]['avg_score'] - metric_dict[k]['std_dev']) plt.fill_between(k_feat, upper, lower, alpha=0.2, color='blue', lw=1) plt.plot(k_feat, avg, color='blue', marker='o') plt.ylabel('Accuracy +/- Standard Deviation') plt.xlabel('Number of Features') feature_min = len(metric_dict[k_feat[0]]['feature_idx']) feature_max = len(metric_dict[k_feat[-1]]['feature_idx']) plt.xticks(k_feat, [str(metric_dict[k]['feature_names']) for k in k_feat], rotation=90) plt.show() Example 3 - Exhaustive Feature Selection for Regression Similar to the classification examples above, the SequentialFeatureSelector also supports scikit-learn's estimators for regression. from sklearn.linear_model import LinearRegression from sklearn.datasets import load_boston boston = load_boston() X, y = boston.data, boston.target lr = LinearRegression() efs = EFS(lr, min_features=10, max_features=12, scoring='neg_mean_squared_error', cv=10) efs.fit(X, y) print('Best MSE score: %.2f' % efs.best_score_ * (-1)) print('Best subset:', efs.best_idx_) Features: 377/377 Best subset: (0, 1, 4, 6, 7, 8, 9, 10, 11, 12) Example 4 - Using the Selected Feature Subset For Making New Predictions # Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) knn = KNeighborsClassifier(n_neighbors=3) # Select the \"best\" three features via # 5-fold cross-validation on the training set. from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', cv=5) efs1 = efs1.fit(X_train, y_train) Features: 15/15 print('Selected features:', efs1.best_idx_) Selected features: (2, 3) # Generate the new subsets based on the selected features # Note that the transform call is equivalent to # X_train[:, efs1.k_feature_idx_] X_train_efs = efs1.transform(X_train) X_test_efs = efs1.transform(X_test) # Fit the estimator using the new feature subset # and make a prediction on the test data knn.fit(X_train_efs, y_train) y_pred = knn.predict(X_test_efs) # Compute the accuracy of the prediction acc = float((y_test == y_pred).sum()) / y_pred.shape[0] print('Test set accuracy: %.2f %%' % (acc*100)) Test set accuracy: 96.00 % Example 5 - Exhaustive Feature Selection and GridSearch # Initialize the dataset from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) Use scikit-learn's GridSearch to tune the hyperparameters of the LogisticRegression estimator inside the ExhaustiveFeatureSelector and use it for prediction in the pipeline. Note that the clone_estimator attribute needs to be set to False . from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS lr = LogisticRegression(multi_class='multinomial', solver='lbfgs', random_state=123) efs1 = EFS(estimator=lr, min_features=2, max_features=3, scoring='accuracy', print_progress=False, clone_estimator=False, cv=5, n_jobs=1) pipe = make_pipeline(efs1, lr) param_grid = {'exhaustivefeatureselector__estimator__C': [0.1, 1.0, 10.0]} gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=2, verbose=1, refit=False) # run gridearch gs = gs.fit(X_train, y_train) Fitting 2 folds for each of 3 candidates, totalling 6 fits [Parallel(n_jobs=1)]: Done 6 out of 6 | elapsed: 2.7s finished ... and the \"best\" parameters determined by GridSearch are ... print(\"Best parameters via GridSearch\", gs.best_params_) Best parameters via GridSearch {'exhaustivefeatureselector__estimator__C': 1.0} Obtaining the best k feature indices after GridSearch If we are interested in the best k best feature indices via SequentialFeatureSelection.best_idx_ , we have to initialize a GridSearchCV object with refit=True . Now, the grid search object will take the complete training dataset and the best parameters, which it found via cross-validation, to train the estimator pipeline. gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=2, verbose=1, refit=True) After running the grid search, we can access the individual pipeline objects of the best_estimator_ via the steps attribute. gs = gs.fit(X_train, y_train) gs.best_estimator_.steps Fitting 2 folds for each of 3 candidates, totalling 6 fits [Parallel(n_jobs=1)]: Done 6 out of 6 | elapsed: 2.9s finished [('exhaustivefeatureselector', ExhaustiveFeatureSelector(clone_estimator=False, cv=5, estimator=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='multinomial', n_jobs=1, penalty='l2', random_state=123, solver='lbfgs', tol=0.0001, verbose=0, warm_start=False), max_features=3, min_features=2, n_jobs=1, pre_dispatch='2*n_jobs', print_progress=False, scoring='accuracy')), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='multinomial', n_jobs=1, penalty='l2', random_state=123, solver='lbfgs', tol=0.0001, verbose=0, warm_start=False))] Via sub-indexing, we can then obtain the best-selected feature subset: print('Best features:', gs.best_estimator_.steps[0][1].best_idx_) Best features: (2, 3) During cross-validation, this feature combination had a CV accuracy of: print('Best score:', gs.best_score_) Best score: 0.97 gs.best_params_ {'exhaustivefeatureselector__estimator__C': 1.0} Alternatively , if we can set the \"best grid search parameters\" in our pipeline manually if we ran GridSearchCV with refit=False . It should yield the same results: pipe.set_params(**gs.best_params_).fit(X_train, y_train) print('Best features:', pipe.steps[0][1].best_idx_) Best features: (2, 3) Example 6 - Working with pandas DataFrames Optionally, we can also use pandas DataFrames and pandas Series as input to the fit function. In this case, the column names of the pandas DataFrame will be used as feature names. However, note that if custom_feature_names are provided in the fit function, these custom_feature_names take precedence over the DataFrame column-based feature names. import pandas as pd from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() col_names = ('sepal length', 'sepal width', 'petal length', 'petal width') X_df = pd.DataFrame(iris.data, columns=col_names) y_series = pd.Series(iris.target) knn = KNeighborsClassifier(n_neighbors=4) from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) efs1 = efs1.fit(X_df, y_series) print('Best accuracy score: %.2f' % efs1.best_score_) print('Best subset (indices):', efs1.best_idx_) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best accuracy score: 0.97 Best subset (indices): (0, 2, 3) Best subset (corresponding names): ('sepal length', 'petal length', 'petal width') API ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features}","title":"Exhaustive Feature Selector"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#exhaustive-feature-selector","text":"Implementation of an exhaustive feature selector for sampling and evaluating all possible feature combinations in a specified range. from mlxtend.feature_selection import ExhaustiveFeatureSelector","title":"Exhaustive Feature Selector"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#overview","text":"This exhaustive feature selection algorithm is a wrapper approach for brute-force evaluation of feature subsets; the best subset is selected by optimizing a specified performance metric given an arbitrary regressor or classifier. For instance, if the classifier is a logistic regression and the dataset consists of 4 features, the alogorithm will evaluate all 15 feature combinations (if min_features=1 and max_features=4 ) {0} {1} {2} {3} {0, 1} {0, 2} {0, 3} {1, 2} {1, 3} {2, 3} {0, 1, 2} {0, 1, 3} {0, 2, 3} {1, 2, 3} {0, 1, 2, 3} and select the one that results in the best performance (e.g., classification accuracy) of the logistic regression classifier.","title":"Overview"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-1-a-simple-iris-example","text":"Initializing a simple classifier from scikit-learn: from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) efs1 = efs1.fit(X, y) print('Best accuracy score: %.2f' % efs1.best_score_) print('Best subset (indices):', efs1.best_idx_) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best accuracy score: 0.97 Best subset (indices): (0, 2, 3) Best subset (corresponding names): ('0', '2', '3') Note that in the example above, the 'best_feature_names_' are simply a string equivalent of the feature indices. However, we can provide custom feature names to the fit function for this mapping: feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') efs1 = efs1.fit(X, y, custom_feature_names=feature_names) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best subset (corresponding names): ('sepal length', 'petal length', 'petal width') Via the subsets_ attribute, we can take a look at the selected feature indices at each step: efs1.subsets_ {0: {'avg_score': 0.65999999999999992, 'cv_scores': array([ 0.53333333, 0.63333333, 0.73333333, 0.76666667, 0.63333333]), 'feature_idx': (0,), 'feature_names': ('sepal length',)}, 1: {'avg_score': 0.56666666666666665, 'cv_scores': array([ 0.53333333, 0.63333333, 0.6 , 0.5 , 0.56666667]), 'feature_idx': (1,), 'feature_names': ('sepal width',)}, 2: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.93333333, 1. , 0.9 , 0.93333333, 1. ]), 'feature_idx': (2,), 'feature_names': ('petal length',)}, 3: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.86666667, 1. ]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 4: {'avg_score': 0.72666666666666668, 'cv_scores': array([ 0.66666667, 0.8 , 0.63333333, 0.86666667, 0.66666667]), 'feature_idx': (0, 1), 'feature_names': ('sepal length', 'sepal width')}, 5: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 1. , 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (0, 2), 'feature_names': ('sepal length', 'petal length')}, 6: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.96666667, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (0, 3), 'feature_names': ('sepal length', 'petal width')}, 7: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 1. , 0.9 , 0.93333333, 0.93333333]), 'feature_idx': (1, 2), 'feature_names': ('sepal width', 'petal length')}, 8: {'avg_score': 0.94000000000000006, 'cv_scores': array([ 0.96666667, 0.96666667, 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (1, 3), 'feature_names': ('sepal width', 'petal width')}, 9: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.96666667, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (2, 3), 'feature_names': ('petal length', 'petal width')}, 10: {'avg_score': 0.94000000000000006, 'cv_scores': array([ 0.96666667, 0.96666667, 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (0, 1, 2), 'feature_names': ('sepal length', 'sepal width', 'petal length')}, 11: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.93333333, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (0, 1, 3), 'feature_names': ('sepal length', 'sepal width', 'petal width')}, 12: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.96666667, 0.96666667, 0.96666667, 0.96666667, 1. ]), 'feature_idx': (0, 2, 3), 'feature_names': ('sepal length', 'petal length', 'petal width')}, 13: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.93333333, 1. ]), 'feature_idx': (1, 2, 3), 'feature_names': ('sepal width', 'petal length', 'petal width')}, 14: {'avg_score': 0.96666666666666679, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.96666667, 1. ]), 'feature_idx': (0, 1, 2, 3), 'feature_names': ('sepal length', 'sepal width', 'petal length', 'petal width')}}","title":"Example 1 - A simple Iris Example"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-2-visualizing-the-feature-selection-results","text":"For our convenience, we can visualize the output from the feature selection in a pandas DataFrame format using the get_metric_dict method of the ExhaustiveFeatureSelector object. The columns std_dev and std_err represent the standard deviation and standard errors of the cross-validation scores, respectively. Below, we see the DataFrame of the Sequential Forward Selector from Example 2: import pandas as pd iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') efs1 = efs1.fit(X, y, custom_feature_names=feature_names) df = pd.DataFrame.from_dict(efs1.get_metric_dict()).T df.sort_values('avg_score', inplace=True, ascending=False) df Features: 15/15 .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 12 0.973333 0.0171372 [0.966666666667, 0.966666666667, 0.96666666666... (0, 2, 3) (sepal length, petal length, petal width) 0.0133333 0.00666667 14 0.966667 0.0270963 [0.966666666667, 0.966666666667, 0.93333333333... (0, 1, 2, 3) (sepal length, sepal width, petal length, peta... 0.0210819 0.0105409 13 0.96 0.0320608 [0.966666666667, 0.966666666667, 0.93333333333... (1, 2, 3) (sepal width, petal length, petal width) 0.0249444 0.0124722 2 0.953333 0.0514116 [0.933333333333, 1.0, 0.9, 0.933333333333, 1.0] (2,) (petal length,) 0.04 0.02 6 0.953333 0.0436915 [0.966666666667, 0.966666666667, 0.9, 0.933333... (0, 3) (sepal length, petal width) 0.0339935 0.0169967 9 0.953333 0.0436915 [0.966666666667, 0.966666666667, 0.9, 0.933333... (2, 3) (petal length, petal width) 0.0339935 0.0169967 3 0.946667 0.0581151 [0.966666666667, 0.966666666667, 0.93333333333... (3,) (petal width,) 0.0452155 0.0226078 5 0.946667 0.0581151 [0.966666666667, 1.0, 0.866666666667, 0.933333... (0, 2) (sepal length, petal length) 0.0452155 0.0226078 7 0.946667 0.0436915 [0.966666666667, 1.0, 0.9, 0.933333333333, 0.9... (1, 2) (sepal width, petal length) 0.0339935 0.0169967 11 0.946667 0.0436915 [0.933333333333, 0.966666666667, 0.9, 0.933333... (0, 1, 3) (sepal length, sepal width, petal width) 0.0339935 0.0169967 8 0.94 0.0499631 [0.966666666667, 0.966666666667, 0.86666666666... (1, 3) (sepal width, petal width) 0.038873 0.0194365 10 0.94 0.0499631 [0.966666666667, 0.966666666667, 0.86666666666... (0, 1, 2) (sepal length, sepal width, petal length) 0.038873 0.0194365 4 0.726667 0.11623 [0.666666666667, 0.8, 0.633333333333, 0.866666... (0, 1) (sepal length, sepal width) 0.0904311 0.0452155 0 0.66 0.106334 [0.533333333333, 0.633333333333, 0.73333333333... (0,) (sepal length,) 0.0827312 0.0413656 1 0.566667 0.0605892 [0.533333333333, 0.633333333333, 0.6, 0.5, 0.5... (1,) (sepal width,) 0.0471405 0.0235702 import matplotlib.pyplot as plt metric_dict = efs1.get_metric_dict() fig = plt.figure() k_feat = sorted(metric_dict.keys()) avg = [metric_dict[k]['avg_score'] for k in k_feat] upper, lower = [], [] for k in k_feat: upper.append(metric_dict[k]['avg_score'] + metric_dict[k]['std_dev']) lower.append(metric_dict[k]['avg_score'] - metric_dict[k]['std_dev']) plt.fill_between(k_feat, upper, lower, alpha=0.2, color='blue', lw=1) plt.plot(k_feat, avg, color='blue', marker='o') plt.ylabel('Accuracy +/- Standard Deviation') plt.xlabel('Number of Features') feature_min = len(metric_dict[k_feat[0]]['feature_idx']) feature_max = len(metric_dict[k_feat[-1]]['feature_idx']) plt.xticks(k_feat, [str(metric_dict[k]['feature_names']) for k in k_feat], rotation=90) plt.show()","title":"Example 2 - Visualizing the feature selection results"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-3-exhaustive-feature-selection-for-regression","text":"Similar to the classification examples above, the SequentialFeatureSelector also supports scikit-learn's estimators for regression. from sklearn.linear_model import LinearRegression from sklearn.datasets import load_boston boston = load_boston() X, y = boston.data, boston.target lr = LinearRegression() efs = EFS(lr, min_features=10, max_features=12, scoring='neg_mean_squared_error', cv=10) efs.fit(X, y) print('Best MSE score: %.2f' % efs.best_score_ * (-1)) print('Best subset:', efs.best_idx_) Features: 377/377 Best subset: (0, 1, 4, 6, 7, 8, 9, 10, 11, 12)","title":"Example 3 - Exhaustive Feature Selection for Regression"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-4-using-the-selected-feature-subset-for-making-new-predictions","text":"# Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) knn = KNeighborsClassifier(n_neighbors=3) # Select the \"best\" three features via # 5-fold cross-validation on the training set. from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', cv=5) efs1 = efs1.fit(X_train, y_train) Features: 15/15 print('Selected features:', efs1.best_idx_) Selected features: (2, 3) # Generate the new subsets based on the selected features # Note that the transform call is equivalent to # X_train[:, efs1.k_feature_idx_] X_train_efs = efs1.transform(X_train) X_test_efs = efs1.transform(X_test) # Fit the estimator using the new feature subset # and make a prediction on the test data knn.fit(X_train_efs, y_train) y_pred = knn.predict(X_test_efs) # Compute the accuracy of the prediction acc = float((y_test == y_pred).sum()) / y_pred.shape[0] print('Test set accuracy: %.2f %%' % (acc*100)) Test set accuracy: 96.00 %","title":"Example 4 - Using the Selected Feature Subset For Making New Predictions"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-5-exhaustive-feature-selection-and-gridsearch","text":"# Initialize the dataset from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) Use scikit-learn's GridSearch to tune the hyperparameters of the LogisticRegression estimator inside the ExhaustiveFeatureSelector and use it for prediction in the pipeline. Note that the clone_estimator attribute needs to be set to False . from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS lr = LogisticRegression(multi_class='multinomial', solver='lbfgs', random_state=123) efs1 = EFS(estimator=lr, min_features=2, max_features=3, scoring='accuracy', print_progress=False, clone_estimator=False, cv=5, n_jobs=1) pipe = make_pipeline(efs1, lr) param_grid = {'exhaustivefeatureselector__estimator__C': [0.1, 1.0, 10.0]} gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=2, verbose=1, refit=False) # run gridearch gs = gs.fit(X_train, y_train) Fitting 2 folds for each of 3 candidates, totalling 6 fits [Parallel(n_jobs=1)]: Done 6 out of 6 | elapsed: 2.7s finished ... and the \"best\" parameters determined by GridSearch are ... print(\"Best parameters via GridSearch\", gs.best_params_) Best parameters via GridSearch {'exhaustivefeatureselector__estimator__C': 1.0}","title":"Example 5 - Exhaustive Feature Selection and GridSearch"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#obtaining-the-best-k-feature-indices-after-gridsearch","text":"If we are interested in the best k best feature indices via SequentialFeatureSelection.best_idx_ , we have to initialize a GridSearchCV object with refit=True . Now, the grid search object will take the complete training dataset and the best parameters, which it found via cross-validation, to train the estimator pipeline. gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=2, verbose=1, refit=True) After running the grid search, we can access the individual pipeline objects of the best_estimator_ via the steps attribute. gs = gs.fit(X_train, y_train) gs.best_estimator_.steps Fitting 2 folds for each of 3 candidates, totalling 6 fits [Parallel(n_jobs=1)]: Done 6 out of 6 | elapsed: 2.9s finished [('exhaustivefeatureselector', ExhaustiveFeatureSelector(clone_estimator=False, cv=5, estimator=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='multinomial', n_jobs=1, penalty='l2', random_state=123, solver='lbfgs', tol=0.0001, verbose=0, warm_start=False), max_features=3, min_features=2, n_jobs=1, pre_dispatch='2*n_jobs', print_progress=False, scoring='accuracy')), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='multinomial', n_jobs=1, penalty='l2', random_state=123, solver='lbfgs', tol=0.0001, verbose=0, warm_start=False))] Via sub-indexing, we can then obtain the best-selected feature subset: print('Best features:', gs.best_estimator_.steps[0][1].best_idx_) Best features: (2, 3) During cross-validation, this feature combination had a CV accuracy of: print('Best score:', gs.best_score_) Best score: 0.97 gs.best_params_ {'exhaustivefeatureselector__estimator__C': 1.0} Alternatively , if we can set the \"best grid search parameters\" in our pipeline manually if we ran GridSearchCV with refit=False . It should yield the same results: pipe.set_params(**gs.best_params_).fit(X_train, y_train) print('Best features:', pipe.steps[0][1].best_idx_) Best features: (2, 3)","title":"Obtaining the best k feature indices after GridSearch"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-6-working-with-pandas-dataframes","text":"Optionally, we can also use pandas DataFrames and pandas Series as input to the fit function. In this case, the column names of the pandas DataFrame will be used as feature names. However, note that if custom_feature_names are provided in the fit function, these custom_feature_names take precedence over the DataFrame column-based feature names. import pandas as pd from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() col_names = ('sepal length', 'sepal width', 'petal length', 'petal width') X_df = pd.DataFrame(iris.data, columns=col_names) y_series = pd.Series(iris.target) knn = KNeighborsClassifier(n_neighbors=4) from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) efs1 = efs1.fit(X_df, y_series) print('Best accuracy score: %.2f' % efs1.best_score_) print('Best subset (indices):', efs1.best_idx_) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best accuracy score: 0.97 Best subset (indices): (0, 2, 3) Best subset (corresponding names): ('sepal length', 'petal length', 'petal width')","title":"Example 6 - Working with pandas DataFrames"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#api","text":"ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/","title":"API"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#methods","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/","text":"Sequential Feature Selector Implementation of sequential feature algorithms (SFAs) -- greedy search algorithms -- that have been developed as a suboptimal solution to the computationally often not feasible exhaustive search. from mlxtend.feature_selection import SequentialFeatureSelector Overview Sequential feature selection algorithms are a family of greedy search algorithms that are used to reduce an initial d -dimensional feature space to a k -dimensional feature subspace where k < d . The motivation behind feature selection algorithms is to automatically select a subset of features that is most relevant to the problem. The goal of feature selection is two-fold: We want to improve the computational efficiency and reduce the generalization error of the model by removing irrelevant features or noise. A wrapper approach such as sequential feature selection is especially useful if embedded feature selection -- for example, a regularization penalty like LASSO -- is not applicable. In a nutshell, SFAs remove or add one feature at the time based on the classifier performance until a feature subset of the desired size k is reached. There are 4 different flavors of SFAs available via the SequentialFeatureSelector : Sequential Forward Selection (SFS) Sequential Backward Selection (SBS) Sequential Forward Floating Selection (SFFS) Sequential Backward Floating Selection (SBFS) The floating variants, SFFS and SBFS, can be considered as extensions to the simpler SFS and SBS algorithms. The floating algorithms have an additional exclusion or inclusion step to remove features once they were included (or excluded), so that a larger number of feature subset combinations can be sampled. It is important to emphasize that this step is conditional and only occurs if the resulting feature subset is assessed as \"better\" by the criterion function after removal (or addition) of a particular feature. Furthermore, I added an optional check to skip the conditional exclusion steps if the algorithm gets stuck in cycles. How is this different from Recursive Feature Elimination (RFE) -- e.g., as implemented in sklearn.feature_selection.RFE ? RFE is computationally less complex using the feature weight coefficients (e.g., linear models) or feature importance (tree-based algorithms) to eliminate features recursively, whereas SFSs eliminate (or add) features based on a user-defined classifier/regression performance metric. The SFAs are outlined in pseudo code below: Sequential Forward Selection (SFS) Input: Y = \\{y_1, y_2, ..., y_d\\} The SFS algorithm takes the whole d -dimensional feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SFS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = \\emptyset , k = 0 We initialize the algorithm with an empty set \\emptyset (\"null set\") so that k = 0 (where k is the size of the subset). Step 1 (Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k X_{k+1} = X_k + x^+ k = k + 1 Go to Step 1 in this step, we add an additional feature, x^+ , to our feature subset X_k . x^+ is the feature that maximizes our criterion function, that is, the feature that is associated with the best classifier performance if it is added to X_k . We repeat this procedure until the termination criterion is satisfied. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori . Sequential Backward Selection (SBS) Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SBS algorithm takes the whole feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SBS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = Y , k = d We initialize the algorithm with the given feature set so that the k = d . Step 1 (Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k X_{k-1} = X_k - x^- k = k - 1 Go to Step 1 In this step, we remove a feature, x^- from our feature subset X_k . x^- is the feature that maximizes our criterion function upon re,oval, that is, the feature that is associated with the best classifier performance if it is removed from X_k . We repeat this procedure until the termination criterion is satisfied. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori . Sequential Backward Floating Selection (SBFS) Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SBFS algorithm takes the whole feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SBFS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = Y , k = d We initialize the algorithm with the given feature set so that the k = d . Step 1 (Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k X_{k-1} = X_k - x^- k = k - 1 Go to Step 2 In this step, we remove a feature, x^- from our feature subset X_k . x^- is the feature that maximizes our criterion function upon re,oval, that is, the feature that is associated with the best classifier performance if it is removed from X_k . Step 2 (Conditional Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k if J(x_k + x) > J(x_k + x) : X_{k+1} = X_k + x^+ k = k + 1 Go to Step 1 In Step 2, we search for features that improve the classifier performance if they are added back to the feature subset. If such features exist, we add the feature x^+ for which the performance improvement is maximized. If k = 2 or an improvement cannot be made (i.e., such feature x^+ cannot be found), go back to step 1; else, repeat this step. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori . Sequential Forward Floating Selection (SFFS) Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SFFS algorithm takes the whole feature set as input, if our feature space consists of, e.g. 10, if our feature space consists of 10 dimensions ( d = 10 ). Output: a subset of features, X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) The returned output of the algorithm is a subset of the feature space of a specified size. E.g., a subset of 5 features from a 10-dimensional feature space ( k = 5, d = 10 ). Initialization: X_0 = Y , k = d We initialize the algorithm with an empty set (\"null set\") so that the k = 0 (where k is the size of the subset) Step 1 (Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k X_{k+1} = X_k + x^+ k = k + 1 Go to Step 2 Step 2 (Conditional Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k if \\; J(x_k - x) > J(x_k - x) : X_{k-1} = X_k - x^- k = k - 1 Go to Step 1 In step 1, we include the feature from the feature space that leads to the best performance increase for our feature subset (assessed by the criterion function ). Then, we go over to step 2 In step 2, we only remove a feature if the resulting subset would gain an increase in performance. If k = 2 or an improvement cannot be made (i.e., such feature x^+ cannot be found), go back to step 1; else, repeat this step. Steps 1 and 2 are repeated until the Termination criterion is reached. Termination: stop when k equals the number of desired features References Ferri, F. J., Pudil P., Hatef, M., Kittler, J. (1994). \"Comparative study of techniques for large-scale feature selection.\" Pattern Recognition in Practice IV : 403-413. Pudil, P., Novovi\u010dov\u00e1, J., & Kittler, J. (1994). \"Floating search methods in feature selection.\" Pattern recognition letters 15.11 (1994): 1119-1125. Example 1 - A simple Sequential Forward Selection example Initializing a simple classifier from scikit-learn: from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) We start by selection the \"best\" 3 features from the Iris dataset via Sequential Forward Selection (SFS). Here, we set forward=True and floating=False . By choosing cv=0 , we don't perform any cross-validation, therefore, the performance (here: 'accuracy' ) is computed entirely on the training set. from mlxtend.feature_selection import SequentialFeatureSelector as SFS sfs1 = SFS(knn, k_features=3, forward=True, floating=False, verbose=2, scoring='accuracy', cv=0) sfs1 = sfs1.fit(X, y) [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 1/3 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 2/3 -- score: 0.973333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 3/3 -- score: 0.973333333333 Via the subsets_ attribute, we can take a look at the selected feature indices at each step: sfs1.subsets_ {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('3',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('2', '3')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('1', '2', '3')}} Note that the 'feature_names' entry is simply a string representation of the 'feature_idx' in this case. Optionally, we can provide custom feature names via the fit method's custom_feature_names parameter: feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') sfs1 = sfs1.fit(X, y, custom_feature_names=feature_names) sfs1.subsets_ [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 1/3 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 2/3 -- score: 0.973333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 3/3 -- score: 0.973333333333 {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('petal length', 'petal width')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('sepal width', 'petal length', 'petal width')}} Furthermore, we can access the indices of the 3 best features directly via the k_feature_idx_ attribute: sfs1.k_feature_idx_ (1, 2, 3) And similarly, to obtain the names of these features, given that we provided an argument to the custom_feature_names parameter, we can refer to the sfs1.k_feature_names_ attribute: sfs1.k_feature_names_ ('sepal width', 'petal length', 'petal width') Finally, the prediction score for these 3 features can be accesses via k_score_ : sfs1.k_score_ 0.97333333333333338 Example 2 - Toggling between SFS, SBS, SFFS, and SBFS Using the forward and floating parameters, we can toggle between SFS, SBS, SFFS, and SBFS as shown below. Note that we are performing (stratified) 4-fold cross-validation for more robust estimates in contrast to Example 1. Via n_jobs=-1 , we choose to run the cross-validation on all our available CPU cores. # Sequential Forward Selection sfs = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=4, n_jobs=-1) sfs = sfs.fit(X, y) print('\\nSequential Forward Selection (k=3):') print(sfs.k_feature_idx_) print('CV Score:') print(sfs.k_score_) ################################################### # Sequential Backward Selection sbs = SFS(knn, k_features=3, forward=False, floating=False, scoring='accuracy', cv=4, n_jobs=-1) sbs = sbs.fit(X, y) print('\\nSequential Backward Selection (k=3):') print(sbs.k_feature_idx_) print('CV Score:') print(sbs.k_score_) ################################################### # Sequential Forward Floating Selection sffs = SFS(knn, k_features=3, forward=True, floating=True, scoring='accuracy', cv=4, n_jobs=-1) sffs = sffs.fit(X, y) print('\\nSequential Forward Floating Selection (k=3):') print(sffs.k_feature_idx_) print('CV Score:') print(sffs.k_score_) ################################################### # Sequential Backward Floating Selection sbfs = SFS(knn, k_features=3, forward=False, floating=True, scoring='accuracy', cv=4, n_jobs=-1) sbfs = sbfs.fit(X, y) print('\\nSequential Backward Floating Selection (k=3):') print(sbfs.k_feature_idx_) print('CV Score:') print(sbfs.k_score_) Sequential Forward Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Backward Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Forward Floating Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Backward Floating Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 In this simple scenario, selecting the best 3 features out of the 4 available features in the Iris set, we end up with similar results regardless of which sequential selection algorithms we used. Example 3 - Visualizing the results in DataFrames For our convenience, we can visualize the output from the feature selection in a pandas DataFrame format using the get_metric_dict method of the SequentialFeatureSelector object. The columns std_dev and std_err represent the standard deviation and standard errors of the cross-validation scores, respectively. Below, we see the DataFrame of the Sequential Forward Selector from Example 2: import pandas as pd pd.DataFrame.from_dict(sfs.get_metric_dict()).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 1 0.952991 0.0660624 [0.974358974359, 0.948717948718, 0.88888888888... (3,) (3,) 0.0412122 0.0237939 2 0.959936 0.0494801 [0.974358974359, 0.948717948718, 0.91666666666... (2, 3) (2, 3) 0.0308676 0.0178214 3 0.972756 0.0315204 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 Now, let's compare it to the Sequential Backward Selector: pd.DataFrame.from_dict(sbs.get_metric_dict()).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 3 0.972756 0.0315204 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 4 0.952991 0.0372857 [0.974358974359, 0.948717948718, 0.91666666666... (0, 1, 2, 3) (0, 1, 2, 3) 0.0232602 0.0134293 We can see that both SFS and SBFS found the same \"best\" 3 features, however, the intermediate steps where obviously different. The ci_bound column in the DataFrames above represents the confidence interval around the computed cross-validation scores. By default, a confidence interval of 95% is used, but we can use different confidence bounds via the confidence_interval parameter. E.g., the confidence bounds for a 90% confidence interval can be obtained as follows: pd.DataFrame.from_dict(sbs.get_metric_dict(confidence_interval=0.90)).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 3 0.972756 0.0242024 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 4 0.952991 0.0286292 [0.974358974359, 0.948717948718, 0.91666666666... (0, 1, 2, 3) (0, 1, 2, 3) 0.0232602 0.0134293 Example 4 - Plotting the results After importing the little helper function plotting.plot_sequential_feature_selection , we can also visualize the results using matplotlib figures. from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs import matplotlib.pyplot as plt sfs = SFS(knn, k_features=4, forward=True, floating=False, scoring='accuracy', verbose=2, cv=5) sfs = sfs.fit(X, y) fig1 = plot_sfs(sfs.get_metric_dict(), kind='std_dev') plt.ylim([0.8, 1]) plt.title('Sequential Forward Selection (w. StdDev)') plt.grid() plt.show() [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 1/4 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 2/4 -- score: 0.966666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 3/4 -- score: 0.953333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 4/4 -- score: 0.973333333333 Example 5 - Sequential Feature Selection for Regression Similar to the classification examples above, the SequentialFeatureSelector also supports scikit-learn's estimators for regression. from sklearn.linear_model import LinearRegression from sklearn.datasets import load_boston boston = load_boston() X, y = boston.data, boston.target lr = LinearRegression() sfs = SFS(lr, k_features=13, forward=True, floating=False, scoring='neg_mean_squared_error', cv=10) sfs = sfs.fit(X, y) fig = plot_sfs(sfs.get_metric_dict(), kind='std_err') plt.title('Sequential Forward Selection (w. StdErr)') plt.grid() plt.show() Example 6 -- Feature Selection with Fixed Train/Validation Splits If you do not wish to use cross-validation (here: k-fold cross-validation, i.e., rotating training and validation folds), you can use the PredefinedHoldoutSplit class to specify your own, fixed training and validation split. from sklearn.datasets import load_iris from mlxtend.evaluate import PredefinedHoldoutSplit import numpy as np iris = load_iris() X = iris.data y = iris.target rng = np.random.RandomState(123) my_validation_indices = rng.permutation(np.arange(150))[:30] print(my_validation_indices) [ 72 112 132 88 37 138 87 42 8 90 141 33 59 116 135 104 36 13 63 45 28 133 24 127 46 20 31 121 117 4] from sklearn.neighbors import KNeighborsClassifier from mlxtend.feature_selection import SequentialFeatureSelector as SFS knn = KNeighborsClassifier(n_neighbors=4) piter = PredefinedHoldoutSplit(my_validation_indices) sfs1 = SFS(knn, k_features=3, forward=True, floating=False, verbose=2, scoring='accuracy', cv=piter) sfs1 = sfs1.fit(X, y) [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 1/3 -- score: 0.9666666666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 2/3 -- score: 0.9666666666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 3/3 -- score: 0.9666666666666667 Example 7 -- Using the Selected Feature Subset For Making New Predictions # Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) knn = KNeighborsClassifier(n_neighbors=4) # Select the \"best\" three features via # 5-fold cross-validation on the training set. from mlxtend.feature_selection import SequentialFeatureSelector as SFS sfs1 = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=5) sfs1 = sfs1.fit(X_train, y_train) print('Selected features:', sfs1.k_feature_idx_) Selected features: (1, 2, 3) # Generate the new subsets based on the selected features # Note that the transform call is equivalent to # X_train[:, sfs1.k_feature_idx_] X_train_sfs = sfs1.transform(X_train) X_test_sfs = sfs1.transform(X_test) # Fit the estimator using the new feature subset # and make a prediction on the test data knn.fit(X_train_sfs, y_train) y_pred = knn.predict(X_test_sfs) # Compute the accuracy of the prediction acc = float((y_test == y_pred).sum()) / y_pred.shape[0] print('Test set accuracy: %.2f %%' % (acc * 100)) Test set accuracy: 96.00 % Example 8 -- Sequential Feature Selection and GridSearch # Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) Use scikit-learn's GridSearch to tune the hyperparameters inside and outside the SequentialFeatureSelector : from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from mlxtend.feature_selection import SequentialFeatureSelector as SFS import mlxtend knn = KNeighborsClassifier(n_neighbors=2) sfs1 = SFS(estimator=knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=5) pipe = Pipeline([('sfs', sfs1), ('knn', knn)]) param_grid = [ {'sfs__k_features': [1, 2, 3, 4], 'sfs__estimator__n_neighbors': [1, 2, 3, 4]} ] gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=5, refit=False) # run gridearch gs = gs.fit(X_train, y_train) ... and the \"best\" parameters determined by GridSearch are ... print(\"Best parameters via GridSearch\", gs.best_params_) Best parameters via GridSearch {'sfs__estimator__n_neighbors': 1, 'sfs__k_features': 3} Obtaining the best k feature indices after GridSearch If we are interested in the best k feature indices via SequentialFeatureSelection.k_feature_idx_ , we have to initialize a GridSearchCV object with refit=True . Now, the grid search object will take the complete training dataset and the best parameters, which it found via cross-validation, to train the estimator pipeline. gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=5, refit=True) gs = gs.fit(X_train, y_train) After running the grid search, we can access the individual pipeline objects of the best_estimator_ via the steps attribute. gs.best_estimator_.steps [('sfs', SequentialFeatureSelector(clone_estimator=True, cv=5, estimator=KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=1, p=2, weights='uniform'), floating=False, forward=True, k_features=3, n_jobs=1, pre_dispatch='2*n_jobs', scoring='accuracy', verbose=0)), ('knn', KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=2, p=2, weights='uniform'))] Via sub-indexing, we can then obtain the best-selected feature subset: print('Best features:', gs.best_estimator_.steps[0][1].k_feature_idx_) Best features: (0, 1, 3) During cross-validation, this feature combination had a CV accuracy of: print('Best score:', gs.best_score_) Best score: 0.94 gs.best_params_ {'sfs__estimator__n_neighbors': 1, 'sfs__k_features': 3} Alternatively , if we can set the \"best grid search parameters\" in our pipeline manually if we ran GridSearchCV with refit=False . It should yield the same results: pipe.set_params(**gs.best_params_).fit(X_train, y_train) print('Best features:', pipe.steps[0][1].k_feature_idx_) Best features: (0, 1, 3) Example 9 -- Selecting the \"best\" feature combination in a k-range If k_features is set to to a tuple (min_k, max_k) (new in 0.4.2), the SFS will now select the best feature combination that it discovered by iterating from k=1 to max_k (forward), or max_k to min_k (backward). The size of the returned feature subset is then within max_k to min_k , depending on which combination scored best during cross validation. X.shape (150, 4) from mlxtend.feature_selection import SequentialFeatureSelector as SFS from sklearn.neighbors import KNeighborsClassifier from mlxtend.data import wine_data from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline X, y = wine_data() X_train, X_test, y_train, y_test= train_test_split(X, y, stratify=y, test_size=0.3, random_state=1) knn = KNeighborsClassifier(n_neighbors=2) sfs1 = SFS(estimator=knn, k_features=(3, 10), forward=True, floating=False, scoring='accuracy', cv=5) pipe = make_pipeline(StandardScaler(), sfs1) pipe.fit(X_train, y_train) print('best combination (ACC: %.3f): %s\\n' % (sfs1.k_score_, sfs1.k_feature_idx_)) print('all subsets:\\n', sfs1.subsets_) plot_sfs(sfs1.get_metric_dict(), kind='std_err'); best combination (ACC: 0.992): (0, 1, 2, 3, 6, 8, 9, 10, 11, 12) all subsets: {1: {'feature_idx': (6,), 'cv_scores': array([ 0.84615385, 0.6 , 0.88 , 0.79166667, 0.875 ]), 'avg_score': 0.7985641025641026, 'feature_names': ('6',)}, 2: {'feature_idx': (6, 9), 'cv_scores': array([ 0.92307692, 0.88 , 1. , 0.95833333, 0.91666667]), 'avg_score': 0.93561538461538463, 'feature_names': ('6', '9')}, 3: {'feature_idx': (6, 9, 12), 'cv_scores': array([ 0.92307692, 0.92 , 0.96 , 1. , 0.95833333]), 'avg_score': 0.95228205128205123, 'feature_names': ('6', '9', '12')}, 4: {'feature_idx': (3, 6, 9, 12), 'cv_scores': array([ 0.96153846, 0.96 , 0.96 , 1. , 0.95833333]), 'avg_score': 0.96797435897435891, 'feature_names': ('3', '6', '9', '12')}, 5: {'feature_idx': (3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.96 , 1. , 1. , 1. ]), 'avg_score': 0.97661538461538466, 'feature_names': ('3', '6', '9', '10', '12')}, 6: {'feature_idx': (2, 3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.96 , 1. , 0.95833333, 1. ]), 'avg_score': 0.96828205128205125, 'feature_names': ('2', '3', '6', '9', '10', '12')}, 7: {'feature_idx': (0, 2, 3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.92 , 1. , 1. , 1. ]), 'avg_score': 0.96861538461538466, 'feature_names': ('0', '2', '3', '6', '9', '10', '12')}, 8: {'feature_idx': (0, 2, 3, 6, 8, 9, 10, 12), 'cv_scores': array([ 1. , 0.92, 1. , 1. , 1. ]), 'avg_score': 0.98399999999999999, 'feature_names': ('0', '2', '3', '6', '8', '9', '10', '12')}, 9: {'feature_idx': (0, 2, 3, 6, 8, 9, 10, 11, 12), 'cv_scores': array([ 1. , 0.92, 1. , 1. , 1. ]), 'avg_score': 0.98399999999999999, 'feature_names': ('0', '2', '3', '6', '8', '9', '10', '11', '12')}, 10: {'feature_idx': (0, 1, 2, 3, 6, 8, 9, 10, 11, 12), 'cv_scores': array([ 1. , 0.96, 1. , 1. , 1. ]), 'avg_score': 0.99199999999999999, 'feature_names': ('0', '1', '2', '3', '6', '8', '9', '10', '11', '12')}} Example 10 -- Using other cross-validation schemes In addition to standard k-fold and stratified k-fold, other cross validation schemes can be used with SequentialFeatureSelector . For example, GroupKFold or LeaveOneOut cross-validation from scikit-learn. Using GroupKFold with SequentialFeatureSelector from mlxtend.feature_selection import SequentialFeatureSelector as SFS from sklearn.neighbors import KNeighborsClassifier from mlxtend.data import iris_data from sklearn.model_selection import GroupKFold import numpy as np X, y = iris_data() groups = np.arange(len(y)) // 10 print('groups: {}'.format(groups)) groups: [ 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 5 6 6 6 6 6 6 6 6 6 6 7 7 7 7 7 7 7 7 7 7 8 8 8 8 8 8 8 8 8 8 9 9 9 9 9 9 9 9 9 9 10 10 10 10 10 10 10 10 10 10 11 11 11 11 11 11 11 11 11 11 12 12 12 12 12 12 12 12 12 12 13 13 13 13 13 13 13 13 13 13 14 14 14 14 14 14 14 14 14 14] Calling the split() method of a scikit-learn cross-validator object will return a generator that yields train, test splits. cv_gen = GroupKFold(4).split(X, y, groups) cv_gen The cv parameter of SequentialFeatureSelector must be either an int or an iterable yielding train, test splits. This iterable can be constructed by passing the train, test split generator to the built-in list() function. cv = list(cv_gen) knn = KNeighborsClassifier(n_neighbors=2) sfs = SFS(estimator=knn, k_features=2, scoring='accuracy', cv=cv) sfs.fit(X, y) print('best combination (ACC: %.3f): %s\\n' % (sfs.k_score_, sfs.k_feature_idx_)) best combination (ACC: 0.940): (2, 3) Example 11 - Working with pandas DataFrames Example 12 - Using Pandas DataFrames Optionally, we can also use pandas DataFrames and pandas Series as input to the fit function. In this case, the column names of the pandas DataFrame will be used as feature names. However, note that if custom_feature_names are provided in the fit function, these custom_feature_names take precedence over the DataFrame column-based feature names. import pandas as pd from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from mlxtend.feature_selection import SequentialFeatureSelector as SFS iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) sfs1 = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=0) X_df = pd.DataFrame(X, columns=['sepal len', 'petal len', 'sepal width', 'petal width']) X_df.head() .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } sepal len petal len sepal width petal width 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 Also, the target array, y , can be optionally be cast as a Series: y_series = pd.Series(y) y_series.head() 0 0 1 0 2 0 3 0 4 0 dtype: int64 sfs1 = sfs1.fit(X_df, y_series) Note that the only difference of passing a pandas DataFrame as input is that the sfs1.subsets_ array will now contain a new column, sfs1.subsets_ {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('sepal width', 'petal width')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('petal len', 'sepal width', 'petal width')}} In mlxtend version >= 0.13 pandas DataFrames are supported as feature inputs to the SequentianFeatureSelector instead of NumPy arrays or other NumPy-like array types. API SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"Sequential Feature Selector"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#sequential-feature-selector","text":"Implementation of sequential feature algorithms (SFAs) -- greedy search algorithms -- that have been developed as a suboptimal solution to the computationally often not feasible exhaustive search. from mlxtend.feature_selection import SequentialFeatureSelector","title":"Sequential Feature Selector"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#overview","text":"Sequential feature selection algorithms are a family of greedy search algorithms that are used to reduce an initial d -dimensional feature space to a k -dimensional feature subspace where k < d . The motivation behind feature selection algorithms is to automatically select a subset of features that is most relevant to the problem. The goal of feature selection is two-fold: We want to improve the computational efficiency and reduce the generalization error of the model by removing irrelevant features or noise. A wrapper approach such as sequential feature selection is especially useful if embedded feature selection -- for example, a regularization penalty like LASSO -- is not applicable. In a nutshell, SFAs remove or add one feature at the time based on the classifier performance until a feature subset of the desired size k is reached. There are 4 different flavors of SFAs available via the SequentialFeatureSelector : Sequential Forward Selection (SFS) Sequential Backward Selection (SBS) Sequential Forward Floating Selection (SFFS) Sequential Backward Floating Selection (SBFS) The floating variants, SFFS and SBFS, can be considered as extensions to the simpler SFS and SBS algorithms. The floating algorithms have an additional exclusion or inclusion step to remove features once they were included (or excluded), so that a larger number of feature subset combinations can be sampled. It is important to emphasize that this step is conditional and only occurs if the resulting feature subset is assessed as \"better\" by the criterion function after removal (or addition) of a particular feature. Furthermore, I added an optional check to skip the conditional exclusion steps if the algorithm gets stuck in cycles. How is this different from Recursive Feature Elimination (RFE) -- e.g., as implemented in sklearn.feature_selection.RFE ? RFE is computationally less complex using the feature weight coefficients (e.g., linear models) or feature importance (tree-based algorithms) to eliminate features recursively, whereas SFSs eliminate (or add) features based on a user-defined classifier/regression performance metric. The SFAs are outlined in pseudo code below:","title":"Overview"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#sequential-forward-selection-sfs","text":"Input: Y = \\{y_1, y_2, ..., y_d\\} The SFS algorithm takes the whole d -dimensional feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SFS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = \\emptyset , k = 0 We initialize the algorithm with an empty set \\emptyset (\"null set\") so that k = 0 (where k is the size of the subset). Step 1 (Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k X_{k+1} = X_k + x^+ k = k + 1 Go to Step 1 in this step, we add an additional feature, x^+ , to our feature subset X_k . x^+ is the feature that maximizes our criterion function, that is, the feature that is associated with the best classifier performance if it is added to X_k . We repeat this procedure until the termination criterion is satisfied. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori .","title":"Sequential Forward Selection (SFS)"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#sequential-backward-selection-sbs","text":"Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SBS algorithm takes the whole feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SBS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = Y , k = d We initialize the algorithm with the given feature set so that the k = d . Step 1 (Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k X_{k-1} = X_k - x^- k = k - 1 Go to Step 1 In this step, we remove a feature, x^- from our feature subset X_k . x^- is the feature that maximizes our criterion function upon re,oval, that is, the feature that is associated with the best classifier performance if it is removed from X_k . We repeat this procedure until the termination criterion is satisfied. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori .","title":"Sequential Backward Selection (SBS)"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#sequential-backward-floating-selection-sbfs","text":"Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SBFS algorithm takes the whole feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SBFS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = Y , k = d We initialize the algorithm with the given feature set so that the k = d . Step 1 (Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k X_{k-1} = X_k - x^- k = k - 1 Go to Step 2 In this step, we remove a feature, x^- from our feature subset X_k . x^- is the feature that maximizes our criterion function upon re,oval, that is, the feature that is associated with the best classifier performance if it is removed from X_k . Step 2 (Conditional Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k if J(x_k + x) > J(x_k + x) : X_{k+1} = X_k + x^+ k = k + 1 Go to Step 1 In Step 2, we search for features that improve the classifier performance if they are added back to the feature subset. If such features exist, we add the feature x^+ for which the performance improvement is maximized. If k = 2 or an improvement cannot be made (i.e., such feature x^+ cannot be found), go back to step 1; else, repeat this step. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori .","title":"Sequential Backward Floating Selection (SBFS)"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#sequential-forward-floating-selection-sffs","text":"Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SFFS algorithm takes the whole feature set as input, if our feature space consists of, e.g. 10, if our feature space consists of 10 dimensions ( d = 10 ). Output: a subset of features, X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) The returned output of the algorithm is a subset of the feature space of a specified size. E.g., a subset of 5 features from a 10-dimensional feature space ( k = 5, d = 10 ). Initialization: X_0 = Y , k = d We initialize the algorithm with an empty set (\"null set\") so that the k = 0 (where k is the size of the subset) Step 1 (Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k X_{k+1} = X_k + x^+ k = k + 1 Go to Step 2 Step 2 (Conditional Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k if \\; J(x_k - x) > J(x_k - x) : X_{k-1} = X_k - x^- k = k - 1 Go to Step 1 In step 1, we include the feature from the feature space that leads to the best performance increase for our feature subset (assessed by the criterion function ). Then, we go over to step 2 In step 2, we only remove a feature if the resulting subset would gain an increase in performance. If k = 2 or an improvement cannot be made (i.e., such feature x^+ cannot be found), go back to step 1; else, repeat this step. Steps 1 and 2 are repeated until the Termination criterion is reached. Termination: stop when k equals the number of desired features","title":"Sequential Forward Floating Selection (SFFS)"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#references","text":"Ferri, F. J., Pudil P., Hatef, M., Kittler, J. (1994). \"Comparative study of techniques for large-scale feature selection.\" Pattern Recognition in Practice IV : 403-413. Pudil, P., Novovi\u010dov\u00e1, J., & Kittler, J. (1994). \"Floating search methods in feature selection.\" Pattern recognition letters 15.11 (1994): 1119-1125.","title":"References"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-1-a-simple-sequential-forward-selection-example","text":"Initializing a simple classifier from scikit-learn: from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) We start by selection the \"best\" 3 features from the Iris dataset via Sequential Forward Selection (SFS). Here, we set forward=True and floating=False . By choosing cv=0 , we don't perform any cross-validation, therefore, the performance (here: 'accuracy' ) is computed entirely on the training set. from mlxtend.feature_selection import SequentialFeatureSelector as SFS sfs1 = SFS(knn, k_features=3, forward=True, floating=False, verbose=2, scoring='accuracy', cv=0) sfs1 = sfs1.fit(X, y) [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 1/3 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 2/3 -- score: 0.973333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 3/3 -- score: 0.973333333333 Via the subsets_ attribute, we can take a look at the selected feature indices at each step: sfs1.subsets_ {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('3',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('2', '3')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('1', '2', '3')}} Note that the 'feature_names' entry is simply a string representation of the 'feature_idx' in this case. Optionally, we can provide custom feature names via the fit method's custom_feature_names parameter: feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') sfs1 = sfs1.fit(X, y, custom_feature_names=feature_names) sfs1.subsets_ [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 1/3 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 2/3 -- score: 0.973333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 3/3 -- score: 0.973333333333 {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('petal length', 'petal width')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('sepal width', 'petal length', 'petal width')}} Furthermore, we can access the indices of the 3 best features directly via the k_feature_idx_ attribute: sfs1.k_feature_idx_ (1, 2, 3) And similarly, to obtain the names of these features, given that we provided an argument to the custom_feature_names parameter, we can refer to the sfs1.k_feature_names_ attribute: sfs1.k_feature_names_ ('sepal width', 'petal length', 'petal width') Finally, the prediction score for these 3 features can be accesses via k_score_ : sfs1.k_score_ 0.97333333333333338","title":"Example 1 - A simple Sequential Forward Selection example"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-2-toggling-between-sfs-sbs-sffs-and-sbfs","text":"Using the forward and floating parameters, we can toggle between SFS, SBS, SFFS, and SBFS as shown below. Note that we are performing (stratified) 4-fold cross-validation for more robust estimates in contrast to Example 1. Via n_jobs=-1 , we choose to run the cross-validation on all our available CPU cores. # Sequential Forward Selection sfs = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=4, n_jobs=-1) sfs = sfs.fit(X, y) print('\\nSequential Forward Selection (k=3):') print(sfs.k_feature_idx_) print('CV Score:') print(sfs.k_score_) ################################################### # Sequential Backward Selection sbs = SFS(knn, k_features=3, forward=False, floating=False, scoring='accuracy', cv=4, n_jobs=-1) sbs = sbs.fit(X, y) print('\\nSequential Backward Selection (k=3):') print(sbs.k_feature_idx_) print('CV Score:') print(sbs.k_score_) ################################################### # Sequential Forward Floating Selection sffs = SFS(knn, k_features=3, forward=True, floating=True, scoring='accuracy', cv=4, n_jobs=-1) sffs = sffs.fit(X, y) print('\\nSequential Forward Floating Selection (k=3):') print(sffs.k_feature_idx_) print('CV Score:') print(sffs.k_score_) ################################################### # Sequential Backward Floating Selection sbfs = SFS(knn, k_features=3, forward=False, floating=True, scoring='accuracy', cv=4, n_jobs=-1) sbfs = sbfs.fit(X, y) print('\\nSequential Backward Floating Selection (k=3):') print(sbfs.k_feature_idx_) print('CV Score:') print(sbfs.k_score_) Sequential Forward Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Backward Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Forward Floating Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Backward Floating Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 In this simple scenario, selecting the best 3 features out of the 4 available features in the Iris set, we end up with similar results regardless of which sequential selection algorithms we used.","title":"Example 2 - Toggling between SFS, SBS, SFFS, and SBFS"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-3-visualizing-the-results-in-dataframes","text":"For our convenience, we can visualize the output from the feature selection in a pandas DataFrame format using the get_metric_dict method of the SequentialFeatureSelector object. The columns std_dev and std_err represent the standard deviation and standard errors of the cross-validation scores, respectively. Below, we see the DataFrame of the Sequential Forward Selector from Example 2: import pandas as pd pd.DataFrame.from_dict(sfs.get_metric_dict()).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 1 0.952991 0.0660624 [0.974358974359, 0.948717948718, 0.88888888888... (3,) (3,) 0.0412122 0.0237939 2 0.959936 0.0494801 [0.974358974359, 0.948717948718, 0.91666666666... (2, 3) (2, 3) 0.0308676 0.0178214 3 0.972756 0.0315204 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 Now, let's compare it to the Sequential Backward Selector: pd.DataFrame.from_dict(sbs.get_metric_dict()).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 3 0.972756 0.0315204 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 4 0.952991 0.0372857 [0.974358974359, 0.948717948718, 0.91666666666... (0, 1, 2, 3) (0, 1, 2, 3) 0.0232602 0.0134293 We can see that both SFS and SBFS found the same \"best\" 3 features, however, the intermediate steps where obviously different. The ci_bound column in the DataFrames above represents the confidence interval around the computed cross-validation scores. By default, a confidence interval of 95% is used, but we can use different confidence bounds via the confidence_interval parameter. E.g., the confidence bounds for a 90% confidence interval can be obtained as follows: pd.DataFrame.from_dict(sbs.get_metric_dict(confidence_interval=0.90)).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 3 0.972756 0.0242024 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 4 0.952991 0.0286292 [0.974358974359, 0.948717948718, 0.91666666666... (0, 1, 2, 3) (0, 1, 2, 3) 0.0232602 0.0134293","title":"Example 3 - Visualizing the results in DataFrames"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-4-plotting-the-results","text":"After importing the little helper function plotting.plot_sequential_feature_selection , we can also visualize the results using matplotlib figures. from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs import matplotlib.pyplot as plt sfs = SFS(knn, k_features=4, forward=True, floating=False, scoring='accuracy', verbose=2, cv=5) sfs = sfs.fit(X, y) fig1 = plot_sfs(sfs.get_metric_dict(), kind='std_dev') plt.ylim([0.8, 1]) plt.title('Sequential Forward Selection (w. StdDev)') plt.grid() plt.show() [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 1/4 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 2/4 -- score: 0.966666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 3/4 -- score: 0.953333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 4/4 -- score: 0.973333333333","title":"Example 4 - Plotting the results"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-5-sequential-feature-selection-for-regression","text":"Similar to the classification examples above, the SequentialFeatureSelector also supports scikit-learn's estimators for regression. from sklearn.linear_model import LinearRegression from sklearn.datasets import load_boston boston = load_boston() X, y = boston.data, boston.target lr = LinearRegression() sfs = SFS(lr, k_features=13, forward=True, floating=False, scoring='neg_mean_squared_error', cv=10) sfs = sfs.fit(X, y) fig = plot_sfs(sfs.get_metric_dict(), kind='std_err') plt.title('Sequential Forward Selection (w. StdErr)') plt.grid() plt.show()","title":"Example 5 - Sequential Feature Selection for Regression"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-6-feature-selection-with-fixed-trainvalidation-splits","text":"If you do not wish to use cross-validation (here: k-fold cross-validation, i.e., rotating training and validation folds), you can use the PredefinedHoldoutSplit class to specify your own, fixed training and validation split. from sklearn.datasets import load_iris from mlxtend.evaluate import PredefinedHoldoutSplit import numpy as np iris = load_iris() X = iris.data y = iris.target rng = np.random.RandomState(123) my_validation_indices = rng.permutation(np.arange(150))[:30] print(my_validation_indices) [ 72 112 132 88 37 138 87 42 8 90 141 33 59 116 135 104 36 13 63 45 28 133 24 127 46 20 31 121 117 4] from sklearn.neighbors import KNeighborsClassifier from mlxtend.feature_selection import SequentialFeatureSelector as SFS knn = KNeighborsClassifier(n_neighbors=4) piter = PredefinedHoldoutSplit(my_validation_indices) sfs1 = SFS(knn, k_features=3, forward=True, floating=False, verbose=2, scoring='accuracy', cv=piter) sfs1 = sfs1.fit(X, y) [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 1/3 -- score: 0.9666666666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 2/3 -- score: 0.9666666666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 3/3 -- score: 0.9666666666666667","title":"Example 6 -- Feature Selection with Fixed Train/Validation Splits"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-7-using-the-selected-feature-subset-for-making-new-predictions","text":"# Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) knn = KNeighborsClassifier(n_neighbors=4) # Select the \"best\" three features via # 5-fold cross-validation on the training set. from mlxtend.feature_selection import SequentialFeatureSelector as SFS sfs1 = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=5) sfs1 = sfs1.fit(X_train, y_train) print('Selected features:', sfs1.k_feature_idx_) Selected features: (1, 2, 3) # Generate the new subsets based on the selected features # Note that the transform call is equivalent to # X_train[:, sfs1.k_feature_idx_] X_train_sfs = sfs1.transform(X_train) X_test_sfs = sfs1.transform(X_test) # Fit the estimator using the new feature subset # and make a prediction on the test data knn.fit(X_train_sfs, y_train) y_pred = knn.predict(X_test_sfs) # Compute the accuracy of the prediction acc = float((y_test == y_pred).sum()) / y_pred.shape[0] print('Test set accuracy: %.2f %%' % (acc * 100)) Test set accuracy: 96.00 %","title":"Example 7 -- Using the Selected Feature Subset For Making New Predictions"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-8-sequential-feature-selection-and-gridsearch","text":"# Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) Use scikit-learn's GridSearch to tune the hyperparameters inside and outside the SequentialFeatureSelector : from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from mlxtend.feature_selection import SequentialFeatureSelector as SFS import mlxtend knn = KNeighborsClassifier(n_neighbors=2) sfs1 = SFS(estimator=knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=5) pipe = Pipeline([('sfs', sfs1), ('knn', knn)]) param_grid = [ {'sfs__k_features': [1, 2, 3, 4], 'sfs__estimator__n_neighbors': [1, 2, 3, 4]} ] gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=5, refit=False) # run gridearch gs = gs.fit(X_train, y_train) ... and the \"best\" parameters determined by GridSearch are ... print(\"Best parameters via GridSearch\", gs.best_params_) Best parameters via GridSearch {'sfs__estimator__n_neighbors': 1, 'sfs__k_features': 3}","title":"Example 8 -- Sequential Feature Selection and GridSearch"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#obtaining-the-best-k-feature-indices-after-gridsearch","text":"If we are interested in the best k feature indices via SequentialFeatureSelection.k_feature_idx_ , we have to initialize a GridSearchCV object with refit=True . Now, the grid search object will take the complete training dataset and the best parameters, which it found via cross-validation, to train the estimator pipeline. gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=5, refit=True) gs = gs.fit(X_train, y_train) After running the grid search, we can access the individual pipeline objects of the best_estimator_ via the steps attribute. gs.best_estimator_.steps [('sfs', SequentialFeatureSelector(clone_estimator=True, cv=5, estimator=KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=1, p=2, weights='uniform'), floating=False, forward=True, k_features=3, n_jobs=1, pre_dispatch='2*n_jobs', scoring='accuracy', verbose=0)), ('knn', KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=2, p=2, weights='uniform'))] Via sub-indexing, we can then obtain the best-selected feature subset: print('Best features:', gs.best_estimator_.steps[0][1].k_feature_idx_) Best features: (0, 1, 3) During cross-validation, this feature combination had a CV accuracy of: print('Best score:', gs.best_score_) Best score: 0.94 gs.best_params_ {'sfs__estimator__n_neighbors': 1, 'sfs__k_features': 3} Alternatively , if we can set the \"best grid search parameters\" in our pipeline manually if we ran GridSearchCV with refit=False . It should yield the same results: pipe.set_params(**gs.best_params_).fit(X_train, y_train) print('Best features:', pipe.steps[0][1].k_feature_idx_) Best features: (0, 1, 3)","title":"Obtaining the best k feature indices after GridSearch"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-9-selecting-the-best-feature-combination-in-a-k-range","text":"If k_features is set to to a tuple (min_k, max_k) (new in 0.4.2), the SFS will now select the best feature combination that it discovered by iterating from k=1 to max_k (forward), or max_k to min_k (backward). The size of the returned feature subset is then within max_k to min_k , depending on which combination scored best during cross validation. X.shape (150, 4) from mlxtend.feature_selection import SequentialFeatureSelector as SFS from sklearn.neighbors import KNeighborsClassifier from mlxtend.data import wine_data from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline X, y = wine_data() X_train, X_test, y_train, y_test= train_test_split(X, y, stratify=y, test_size=0.3, random_state=1) knn = KNeighborsClassifier(n_neighbors=2) sfs1 = SFS(estimator=knn, k_features=(3, 10), forward=True, floating=False, scoring='accuracy', cv=5) pipe = make_pipeline(StandardScaler(), sfs1) pipe.fit(X_train, y_train) print('best combination (ACC: %.3f): %s\\n' % (sfs1.k_score_, sfs1.k_feature_idx_)) print('all subsets:\\n', sfs1.subsets_) plot_sfs(sfs1.get_metric_dict(), kind='std_err'); best combination (ACC: 0.992): (0, 1, 2, 3, 6, 8, 9, 10, 11, 12) all subsets: {1: {'feature_idx': (6,), 'cv_scores': array([ 0.84615385, 0.6 , 0.88 , 0.79166667, 0.875 ]), 'avg_score': 0.7985641025641026, 'feature_names': ('6',)}, 2: {'feature_idx': (6, 9), 'cv_scores': array([ 0.92307692, 0.88 , 1. , 0.95833333, 0.91666667]), 'avg_score': 0.93561538461538463, 'feature_names': ('6', '9')}, 3: {'feature_idx': (6, 9, 12), 'cv_scores': array([ 0.92307692, 0.92 , 0.96 , 1. , 0.95833333]), 'avg_score': 0.95228205128205123, 'feature_names': ('6', '9', '12')}, 4: {'feature_idx': (3, 6, 9, 12), 'cv_scores': array([ 0.96153846, 0.96 , 0.96 , 1. , 0.95833333]), 'avg_score': 0.96797435897435891, 'feature_names': ('3', '6', '9', '12')}, 5: {'feature_idx': (3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.96 , 1. , 1. , 1. ]), 'avg_score': 0.97661538461538466, 'feature_names': ('3', '6', '9', '10', '12')}, 6: {'feature_idx': (2, 3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.96 , 1. , 0.95833333, 1. ]), 'avg_score': 0.96828205128205125, 'feature_names': ('2', '3', '6', '9', '10', '12')}, 7: {'feature_idx': (0, 2, 3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.92 , 1. , 1. , 1. ]), 'avg_score': 0.96861538461538466, 'feature_names': ('0', '2', '3', '6', '9', '10', '12')}, 8: {'feature_idx': (0, 2, 3, 6, 8, 9, 10, 12), 'cv_scores': array([ 1. , 0.92, 1. , 1. , 1. ]), 'avg_score': 0.98399999999999999, 'feature_names': ('0', '2', '3', '6', '8', '9', '10', '12')}, 9: {'feature_idx': (0, 2, 3, 6, 8, 9, 10, 11, 12), 'cv_scores': array([ 1. , 0.92, 1. , 1. , 1. ]), 'avg_score': 0.98399999999999999, 'feature_names': ('0', '2', '3', '6', '8', '9', '10', '11', '12')}, 10: {'feature_idx': (0, 1, 2, 3, 6, 8, 9, 10, 11, 12), 'cv_scores': array([ 1. , 0.96, 1. , 1. , 1. ]), 'avg_score': 0.99199999999999999, 'feature_names': ('0', '1', '2', '3', '6', '8', '9', '10', '11', '12')}}","title":"Example 9 -- Selecting the \"best\" feature combination in a k-range"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-10-using-other-cross-validation-schemes","text":"In addition to standard k-fold and stratified k-fold, other cross validation schemes can be used with SequentialFeatureSelector . For example, GroupKFold or LeaveOneOut cross-validation from scikit-learn.","title":"Example 10 -- Using other cross-validation schemes"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#using-groupkfold-with-sequentialfeatureselector","text":"from mlxtend.feature_selection import SequentialFeatureSelector as SFS from sklearn.neighbors import KNeighborsClassifier from mlxtend.data import iris_data from sklearn.model_selection import GroupKFold import numpy as np X, y = iris_data() groups = np.arange(len(y)) // 10 print('groups: {}'.format(groups)) groups: [ 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 5 6 6 6 6 6 6 6 6 6 6 7 7 7 7 7 7 7 7 7 7 8 8 8 8 8 8 8 8 8 8 9 9 9 9 9 9 9 9 9 9 10 10 10 10 10 10 10 10 10 10 11 11 11 11 11 11 11 11 11 11 12 12 12 12 12 12 12 12 12 12 13 13 13 13 13 13 13 13 13 13 14 14 14 14 14 14 14 14 14 14] Calling the split() method of a scikit-learn cross-validator object will return a generator that yields train, test splits. cv_gen = GroupKFold(4).split(X, y, groups) cv_gen The cv parameter of SequentialFeatureSelector must be either an int or an iterable yielding train, test splits. This iterable can be constructed by passing the train, test split generator to the built-in list() function. cv = list(cv_gen) knn = KNeighborsClassifier(n_neighbors=2) sfs = SFS(estimator=knn, k_features=2, scoring='accuracy', cv=cv) sfs.fit(X, y) print('best combination (ACC: %.3f): %s\\n' % (sfs.k_score_, sfs.k_feature_idx_)) best combination (ACC: 0.940): (2, 3)","title":"Using GroupKFold with SequentialFeatureSelector"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-11-working-with-pandas-dataframes","text":"","title":"Example 11 - Working with pandas DataFrames"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-12-using-pandas-dataframes","text":"Optionally, we can also use pandas DataFrames and pandas Series as input to the fit function. In this case, the column names of the pandas DataFrame will be used as feature names. However, note that if custom_feature_names are provided in the fit function, these custom_feature_names take precedence over the DataFrame column-based feature names. import pandas as pd from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from mlxtend.feature_selection import SequentialFeatureSelector as SFS iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) sfs1 = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=0) X_df = pd.DataFrame(X, columns=['sepal len', 'petal len', 'sepal width', 'petal width']) X_df.head() .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } sepal len petal len sepal width petal width 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 Also, the target array, y , can be optionally be cast as a Series: y_series = pd.Series(y) y_series.head() 0 0 1 0 2 0 3 0 4 0 dtype: int64 sfs1 = sfs1.fit(X_df, y_series) Note that the only difference of passing a pandas DataFrame as input is that the sfs1.subsets_ array will now contain a new column, sfs1.subsets_ {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('sepal width', 'petal width')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('petal len', 'sepal width', 'petal width')}} In mlxtend version >= 0.13 pandas DataFrames are supported as feature inputs to the SequentianFeatureSelector instead of NumPy arrays or other NumPy-like array types.","title":"Example 12 - Using Pandas DataFrames"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#api","text":"SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/","title":"API"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#methods","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"user_guide/file_io/find_filegroups/","text":"Find Filegroups A function that finds files that belong together (i.e., differ only by file extension) in different directories and collects them in a Python dictionary for further processing tasks. from mlxtend.file_io import find_filegroups Overview This function finds files that are related to each other based on their file names. This can be useful for parsing collections files that have been stored in different subdirectories, for examples: input_dir/ task01.txt task02.txt ... log_dir/ task01.log task02.log ... output_dir/ task01.dat task02.dat ... References - Example 1 - Grouping related files in a dictionary Given the following directory and file structure dir_1/ file_1.log file_2.log file_3.log dir_2/ file_1.csv file_2.csv file_3.csv dir_3/ file_1.txt file_2.txt file_3.txt we can use find_filegroups to group related files as items of a dictionary as shown below: from mlxtend.file_io import find_filegroups find_filegroups(paths=['./data_find_filegroups/dir_1', './data_find_filegroups/dir_2', './data_find_filegroups/dir_3'], substring='file_') {'file_1': ['./data_find_filegroups/dir_1/file_1.log', './data_find_filegroups/dir_2/file_1.csv', './data_find_filegroups/dir_3/file_1.txt'], 'file_2': ['./data_find_filegroups/dir_1/file_2.log', './data_find_filegroups/dir_2/file_2.csv', './data_find_filegroups/dir_3/file_2.txt'], 'file_3': ['./data_find_filegroups/dir_1/file_3.log', './data_find_filegroups/dir_2/file_3.csv', './data_find_filegroups/dir_3/file_3.txt']} API find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/","title":"Find Filegroups"},{"location":"user_guide/file_io/find_filegroups/#find-filegroups","text":"A function that finds files that belong together (i.e., differ only by file extension) in different directories and collects them in a Python dictionary for further processing tasks. from mlxtend.file_io import find_filegroups","title":"Find Filegroups"},{"location":"user_guide/file_io/find_filegroups/#overview","text":"This function finds files that are related to each other based on their file names. This can be useful for parsing collections files that have been stored in different subdirectories, for examples: input_dir/ task01.txt task02.txt ... log_dir/ task01.log task02.log ... output_dir/ task01.dat task02.dat ...","title":"Overview"},{"location":"user_guide/file_io/find_filegroups/#references","text":"-","title":"References"},{"location":"user_guide/file_io/find_filegroups/#example-1-grouping-related-files-in-a-dictionary","text":"Given the following directory and file structure dir_1/ file_1.log file_2.log file_3.log dir_2/ file_1.csv file_2.csv file_3.csv dir_3/ file_1.txt file_2.txt file_3.txt we can use find_filegroups to group related files as items of a dictionary as shown below: from mlxtend.file_io import find_filegroups find_filegroups(paths=['./data_find_filegroups/dir_1', './data_find_filegroups/dir_2', './data_find_filegroups/dir_3'], substring='file_') {'file_1': ['./data_find_filegroups/dir_1/file_1.log', './data_find_filegroups/dir_2/file_1.csv', './data_find_filegroups/dir_3/file_1.txt'], 'file_2': ['./data_find_filegroups/dir_1/file_2.log', './data_find_filegroups/dir_2/file_2.csv', './data_find_filegroups/dir_3/file_2.txt'], 'file_3': ['./data_find_filegroups/dir_1/file_3.log', './data_find_filegroups/dir_2/file_3.csv', './data_find_filegroups/dir_3/file_3.txt']}","title":"Example 1 - Grouping related files in a dictionary"},{"location":"user_guide/file_io/find_filegroups/#api","text":"find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/","title":"API"},{"location":"user_guide/file_io/find_files/","text":"Find Files A function that finds files in a given directory based on substring matches and returns a list of the file names found. from mlxtend.file_io import find_files Overview This function finds files based on substring search. This is especially useful if we want to find specific files in a directory tree and return their absolute paths for further processing in Python. References - Example 1 - Grouping related files in a dictionary Given the following directory and file structure dir_1/ file_1.log file_2.log file_3.log dir_2/ file_1.csv file_2.csv file_3.csv dir_3/ file_1.txt file_2.txt file_3.txt we can use find_files to return the paths to all files that contain the substring _2 as follows: from mlxtend.file_io import find_files find_files(substring='_2', path='./data_find_filegroups/', recursive=True) ['./data_find_filegroups/dir_1/file_2.log', './data_find_filegroups/dir_2/file_2.csv', './data_find_filegroups/dir_3/file_2.txt'] API find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"Find Files"},{"location":"user_guide/file_io/find_files/#find-files","text":"A function that finds files in a given directory based on substring matches and returns a list of the file names found. from mlxtend.file_io import find_files","title":"Find Files"},{"location":"user_guide/file_io/find_files/#overview","text":"This function finds files based on substring search. This is especially useful if we want to find specific files in a directory tree and return their absolute paths for further processing in Python.","title":"Overview"},{"location":"user_guide/file_io/find_files/#references","text":"-","title":"References"},{"location":"user_guide/file_io/find_files/#example-1-grouping-related-files-in-a-dictionary","text":"Given the following directory and file structure dir_1/ file_1.log file_2.log file_3.log dir_2/ file_1.csv file_2.csv file_3.csv dir_3/ file_1.txt file_2.txt file_3.txt we can use find_files to return the paths to all files that contain the substring _2 as follows: from mlxtend.file_io import find_files find_files(substring='_2', path='./data_find_filegroups/', recursive=True) ['./data_find_filegroups/dir_1/file_2.log', './data_find_filegroups/dir_2/file_2.csv', './data_find_filegroups/dir_3/file_2.txt']","title":"Example 1 - Grouping related files in a dictionary"},{"location":"user_guide/file_io/find_files/#api","text":"find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"API"},{"location":"user_guide/frequent_patterns/apriori/","text":"Frequent Itemsets via Apriori Algorithm Apriori function to extract frequent itemsets for association rule mining from mlxtend.frequent_patterns import apriori Overview Apriori is a popular algorithm [1] for extracting frequent itemsets with applications in association rule learning. The apriori algorithm has been designed to operate on databases containing transactions, such as purchases by customers of a store. An itemset is considered as \"frequent\" if it meets a user-specified support threshold. For instance, if the support threshold is set to 0.5 (50%), a frequent itemset is defined as a set of items that occur together in at least 50% of all transactions in the database. References [1] Agrawal, Rakesh, and Ramakrishnan Srikant. \" Fast algorithms for mining association rules .\" Proc. 20th int. conf. very large data bases, VLDB. Vol. 1215. 1994. Example 1 -- Generating Frequent Itemsets The apriori function expects data in a one-hot encoded pandas DataFrame. Suppose we have the following transaction data: dataset = [['Milk', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Dill', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Milk', 'Apple', 'Kidney Beans', 'Eggs'], ['Milk', 'Unicorn', 'Corn', 'Kidney Beans', 'Yogurt'], ['Corn', 'Onion', 'Onion', 'Kidney Beans', 'Ice cream', 'Eggs']] We can transform it into the right format via the TransactionEncoder as follows: import pandas as pd from mlxtend.preprocessing import TransactionEncoder te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) df = pd.DataFrame(te_ary, columns=te.columns_) df .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Corn Dill Eggs Ice cream Kidney Beans Milk Nutmeg Onion Unicorn Yogurt 0 False False False True False True True True True False True 1 False False True True False True False True True False True 2 True False False True False True True False False False False 3 False True False False False True True False False True True 4 False True False True True True False False True False False Now, let us return the items and itemsets with at least 60% support: from mlxtend.frequent_patterns import apriori apriori(df, min_support=0.6) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (3) 1 1.0 (5) 2 0.6 (6) 3 0.6 (8) 4 0.6 (10) 5 0.8 (3, 5) 6 0.6 (8, 3) 7 0.6 (5, 6) 8 0.6 (8, 5) 9 0.6 (10, 5) 10 0.6 (8, 3, 5) By default, apriori returns the column indices of the items, which may be useful in downstream operations such as association rule mining. For better readability, we can set use_colnames=True to convert these integer values into the respective item names: apriori(df, min_support=0.6, use_colnames=True) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Eggs, Kidney Beans) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Eggs, Kidney Beans) Example 2 -- Selecting and Filtering Results The advantage of working with pandas DataFrames is that we can use its convenient features to filter the results. For instance, let's assume we are only interested in itemsets of length 2 that have a support of at least 80 percent. First, we create the frequent itemsets via apriori and add a new column that stores the length of each itemset: frequent_itemsets = apriori(df, min_support=0.6, use_colnames=True) frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x)) frequent_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 0 0.8 (Eggs) 1 1 1.0 (Kidney Beans) 1 2 0.6 (Milk) 1 3 0.6 (Onion) 1 4 0.6 (Yogurt) 1 5 0.8 (Eggs, Kidney Beans) 2 6 0.6 (Onion, Eggs) 2 7 0.6 (Milk, Kidney Beans) 2 8 0.6 (Onion, Kidney Beans) 2 9 0.6 (Kidney Beans, Yogurt) 2 10 0.6 (Onion, Eggs, Kidney Beans) 3 Then, we can select the results that satisfy our desired criteria as follows: frequent_itemsets[ (frequent_itemsets['length'] == 2) & (frequent_itemsets['support'] >= 0.8) ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 5 0.8 (Eggs, Kidney Beans) 2 Similarly, using the Pandas API, we can select entries based on the \"itemsets\" column: frequent_itemsets[ frequent_itemsets['itemsets'] == {'Onion', 'Eggs'} ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 6 0.6 (Onion, Eggs) 2 Frozensets Note that the entries in the \"itemsets\" column are of type frozenset , which is built-in Python type that is similar to a Python set but immutable, which makes it more efficient for certain query or comparison operations (https://docs.python.org/3.6/library/stdtypes.html#frozenset). Since frozenset s are sets, the item order does not matter. I.e., the query frequent_itemsets[ frequent_itemsets['itemsets'] == {'Onion', 'Eggs'} ] is equivalent to any of the following three frequent_itemsets[ frequent_itemsets['itemsets'] == {'Eggs', 'Onion'} ] frequent_itemsets[ frequent_itemsets['itemsets'] == frozenset(('Eggs', 'Onion')) ] frequent_itemsets[ frequent_itemsets['itemsets'] == frozenset(('Onion', 'Eggs')) ] Example 3 -- Working with Sparse Representations To save memory, you may want to represent your transaction data in the sparse format. This is especially useful if you have lots of products and small transactions. oht_ary = te.fit(dataset).transform(dataset, sparse=True) sparse_df = pd.SparseDataFrame(te_ary, columns=te.columns_, default_fill_value=False) sparse_df .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Corn Dill Eggs Ice cream Kidney Beans Milk Nutmeg Onion Unicorn Yogurt 0 False False False True False True True True True False True 1 False False True True False True False True True False True 2 True False False True False True True False False False False 3 False True False False False True True False False True True 4 False True False True True True False False True False False apriori(sparse_df, min_support=0.6, use_colnames=True) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Eggs, Kidney Beans) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Eggs, Kidney Beans) API apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/","title":"Apriori"},{"location":"user_guide/frequent_patterns/apriori/#frequent-itemsets-via-apriori-algorithm","text":"Apriori function to extract frequent itemsets for association rule mining from mlxtend.frequent_patterns import apriori","title":"Frequent Itemsets via Apriori Algorithm"},{"location":"user_guide/frequent_patterns/apriori/#overview","text":"Apriori is a popular algorithm [1] for extracting frequent itemsets with applications in association rule learning. The apriori algorithm has been designed to operate on databases containing transactions, such as purchases by customers of a store. An itemset is considered as \"frequent\" if it meets a user-specified support threshold. For instance, if the support threshold is set to 0.5 (50%), a frequent itemset is defined as a set of items that occur together in at least 50% of all transactions in the database.","title":"Overview"},{"location":"user_guide/frequent_patterns/apriori/#references","text":"[1] Agrawal, Rakesh, and Ramakrishnan Srikant. \" Fast algorithms for mining association rules .\" Proc. 20th int. conf. very large data bases, VLDB. Vol. 1215. 1994.","title":"References"},{"location":"user_guide/frequent_patterns/apriori/#example-1-generating-frequent-itemsets","text":"The apriori function expects data in a one-hot encoded pandas DataFrame. Suppose we have the following transaction data: dataset = [['Milk', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Dill', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Milk', 'Apple', 'Kidney Beans', 'Eggs'], ['Milk', 'Unicorn', 'Corn', 'Kidney Beans', 'Yogurt'], ['Corn', 'Onion', 'Onion', 'Kidney Beans', 'Ice cream', 'Eggs']] We can transform it into the right format via the TransactionEncoder as follows: import pandas as pd from mlxtend.preprocessing import TransactionEncoder te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) df = pd.DataFrame(te_ary, columns=te.columns_) df .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Corn Dill Eggs Ice cream Kidney Beans Milk Nutmeg Onion Unicorn Yogurt 0 False False False True False True True True True False True 1 False False True True False True False True True False True 2 True False False True False True True False False False False 3 False True False False False True True False False True True 4 False True False True True True False False True False False Now, let us return the items and itemsets with at least 60% support: from mlxtend.frequent_patterns import apriori apriori(df, min_support=0.6) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (3) 1 1.0 (5) 2 0.6 (6) 3 0.6 (8) 4 0.6 (10) 5 0.8 (3, 5) 6 0.6 (8, 3) 7 0.6 (5, 6) 8 0.6 (8, 5) 9 0.6 (10, 5) 10 0.6 (8, 3, 5) By default, apriori returns the column indices of the items, which may be useful in downstream operations such as association rule mining. For better readability, we can set use_colnames=True to convert these integer values into the respective item names: apriori(df, min_support=0.6, use_colnames=True) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Eggs, Kidney Beans) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Eggs, Kidney Beans)","title":"Example 1 -- Generating Frequent Itemsets"},{"location":"user_guide/frequent_patterns/apriori/#example-2-selecting-and-filtering-results","text":"The advantage of working with pandas DataFrames is that we can use its convenient features to filter the results. For instance, let's assume we are only interested in itemsets of length 2 that have a support of at least 80 percent. First, we create the frequent itemsets via apriori and add a new column that stores the length of each itemset: frequent_itemsets = apriori(df, min_support=0.6, use_colnames=True) frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x)) frequent_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 0 0.8 (Eggs) 1 1 1.0 (Kidney Beans) 1 2 0.6 (Milk) 1 3 0.6 (Onion) 1 4 0.6 (Yogurt) 1 5 0.8 (Eggs, Kidney Beans) 2 6 0.6 (Onion, Eggs) 2 7 0.6 (Milk, Kidney Beans) 2 8 0.6 (Onion, Kidney Beans) 2 9 0.6 (Kidney Beans, Yogurt) 2 10 0.6 (Onion, Eggs, Kidney Beans) 3 Then, we can select the results that satisfy our desired criteria as follows: frequent_itemsets[ (frequent_itemsets['length'] == 2) & (frequent_itemsets['support'] >= 0.8) ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 5 0.8 (Eggs, Kidney Beans) 2 Similarly, using the Pandas API, we can select entries based on the \"itemsets\" column: frequent_itemsets[ frequent_itemsets['itemsets'] == {'Onion', 'Eggs'} ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 6 0.6 (Onion, Eggs) 2 Frozensets Note that the entries in the \"itemsets\" column are of type frozenset , which is built-in Python type that is similar to a Python set but immutable, which makes it more efficient for certain query or comparison operations (https://docs.python.org/3.6/library/stdtypes.html#frozenset). Since frozenset s are sets, the item order does not matter. I.e., the query frequent_itemsets[ frequent_itemsets['itemsets'] == {'Onion', 'Eggs'} ] is equivalent to any of the following three frequent_itemsets[ frequent_itemsets['itemsets'] == {'Eggs', 'Onion'} ] frequent_itemsets[ frequent_itemsets['itemsets'] == frozenset(('Eggs', 'Onion')) ] frequent_itemsets[ frequent_itemsets['itemsets'] == frozenset(('Onion', 'Eggs')) ]","title":"Example 2 -- Selecting and Filtering Results"},{"location":"user_guide/frequent_patterns/apriori/#example-3-working-with-sparse-representations","text":"To save memory, you may want to represent your transaction data in the sparse format. This is especially useful if you have lots of products and small transactions. oht_ary = te.fit(dataset).transform(dataset, sparse=True) sparse_df = pd.SparseDataFrame(te_ary, columns=te.columns_, default_fill_value=False) sparse_df .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Corn Dill Eggs Ice cream Kidney Beans Milk Nutmeg Onion Unicorn Yogurt 0 False False False True False True True True True False True 1 False False True True False True False True True False True 2 True False False True False True True False False False False 3 False True False False False True True False False True True 4 False True False True True True False False True False False apriori(sparse_df, min_support=0.6, use_colnames=True) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Eggs, Kidney Beans) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Eggs, Kidney Beans)","title":"Example 3 -- Working with Sparse Representations"},{"location":"user_guide/frequent_patterns/apriori/#api","text":"apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/","title":"API"},{"location":"user_guide/frequent_patterns/association_rules/","text":"Association Rules Generation from Frequent Itemsets Function to generate association rules from frequent itemsets from mlxtend.frequent_patterns import association_rules Overview Rule generation is a common task in the mining of frequent patterns. An association rule is an implication expression of the form X \\rightarrow Y , where X and Y are disjoint itemsets [1]. A more concrete example based on consumer behaviour would be \\{Diapers\\} \\rightarrow \\{Beer\\} suggesting that people who buy diapers are also likely to buy beer. To evaluate the \"interest\" of such an association rule, different metrics have been developed. The current implementation make use of the confidence and lift metrics. Metrics The currently supported metrics for evaluating association rules and setting selection thresholds are listed below. Given a rule \"A -> C\", A stands for antecedent and C stands for consequent. 'support': \\text{support}(A\\rightarrow C) = \\text{support}(A \\cup C), \\;\\;\\; \\text{range: } [0, 1] introduced in [3] The support metric is defined for itemsets, not assocication rules. The table produced by the association rule mining algorithm contains three different support metrics: 'antecedent support', 'consequent support', and 'support'. Here, 'antecedent support' computes the proportion of transactions that contain the antecedent A, and 'consequent support' computes the support for the itemset of the consequent C. The 'support' metric then computes the support of the combined itemset A \\cup C -- note that 'support' depends on 'antecedent support' and 'consequent support' via min('antecedent support', 'consequent support'). Typically, support is used to measure the abundance or frequency (often interpreted as significance or importance) of an itemset in a database. We refer to an itemset as a \"frequent itemset\" if you support is larger than a specified minimum-support threshold. Note that in general, due to the downward closure property, all subsets of a frequent itemset are also frequent. 'confidence': \\text{confidence}(A\\rightarrow C) = \\frac{\\text{support}(A\\rightarrow C)}{\\text{support}(A)}, \\;\\;\\; \\text{range: } [0, 1] introduced in [3] The confidence of a rule A->C is the probability of seeing the consequent in a transaction given that it also contains the antecedent. Note that the metric is not symmetric or directed; for instance, the confidence for A->C is different than the confidence for C->A. The confidence is 1 (maximal) for a rule A->C if the consequent and antecedent always occur together. 'lift': \\text{lift}(A\\rightarrow C) = \\frac{\\text{confidence}(A\\rightarrow C)}{\\text{support}(C)}, \\;\\;\\; \\text{range: } [0, \\infty] introduced in [4] The lift metric is commonly used to measure how much more often the antecedent and consequent of a rule A->C occur together than we would expect if they were statistically independent. If A and C are independent, the Lift score will be exactly 1. 'leverage': \\text{levarage}(A\\rightarrow C) = \\text{support}(A\\rightarrow C) - \\text{support}(A) \\times \\text{support}(C), \\;\\;\\; \\text{range: } [-1, 1] introduced in [5] Leverage computes the difference between the observed frequency of A and C appearing together and the frequency that would be expected if A and C were independent. An leverage value of 0 indicates independence. 'conviction': \\text{conviction}(A\\rightarrow C) = \\frac{1 - \\text{support}(C)}{1 - \\text{confidence}(A\\rightarrow C)}, \\;\\;\\; \\text{range: } [0, \\infty] introduced in [6] A high conviction value means that the consequent is highly depending on the antecedent. For instance, in the case of a perfect confidence score, the denominator becomes 0 (due to 1 - 1) for which the conviction score is defined as 'inf'. Similar to lift, if items are independent, the conviction is 1. References [1] Tan, Steinbach, Kumar. Introduction to Data Mining. Pearson New International Edition. Harlow: Pearson Education Ltd., 2014. (pp. 327-414). [2] Michael Hahsler, http://michael.hahsler.net/research/association_rules/measures.html [3] R. Agrawal, T. Imielinski, and A. Swami. Mining associations between sets of items in large databases. In Proc. of the ACM SIGMOD Int'l Conference on Management of Data, pages 207-216, Washington D.C., May 1993 [4] S. Brin, R. Motwani, J. D. Ullman, and S. Tsur. Dynamic itemset counting and implication rules for market basket data [5] Piatetsky-Shapiro, G., Discovery, analysis, and presentation of strong rules. Knowledge Discovery in Databases, 1991: p. 229-248. [6] Sergey Brin, Rajeev Motwani, Jeffrey D. Ullman, and Shalom Turk. Dynamic itemset counting and implication rules for market basket data. In SIGMOD 1997, Proceedings ACM SIGMOD International Conference on Management of Data, pages 255-264, Tucson, Arizona, USA, May 1997 Example 1 -- Generating Association Rules from Frequent Itemsets The generate_rules takes dataframes of frequent itemsets as produced by the apriori function in mlxtend.association . To demonstrate the usage of the generate_rules method, we first create a pandas DataFrame of frequent itemsets as generated by the apriori function: import pandas as pd from mlxtend.preprocessing import TransactionEncoder from mlxtend.frequent_patterns import apriori dataset = [['Milk', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Dill', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Milk', 'Apple', 'Kidney Beans', 'Eggs'], ['Milk', 'Unicorn', 'Corn', 'Kidney Beans', 'Yogurt'], ['Corn', 'Onion', 'Onion', 'Kidney Beans', 'Ice cream', 'Eggs']] te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) df = pd.DataFrame(te_ary, columns=te.columns_) frequent_itemsets = apriori(df, min_support=0.6, use_colnames=True) frequent_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Kidney Beans, Eggs) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Kidney Beans, Eggs) The generate_rules() function allows you to (1) specify your metric of interest and (2) the according threshold. Currently implemented measures are confidence and lift . Let's say you are interesting in rules derived from the frequent itemsets only if the level of confidence is above the 90 percent threshold ( min_threshold=0.7 ): from mlxtend.frequent_patterns import association_rules association_rules(frequent_itemsets, metric=\"confidence\", min_threshold=0.7) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (Kidney Beans) (Eggs) 1.0 0.8 0.8 0.80 1.00 0.00 1.000000 1 (Eggs) (Kidney Beans) 0.8 1.0 0.8 1.00 1.00 0.00 inf 2 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 3 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 4 (Milk) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 5 (Onion) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 6 (Yogurt) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 7 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 8 (Onion, Eggs) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 9 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 10 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 11 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 Example 2 -- Rule Generation and Selection Criteria If you are interested in rules according to a different metric of interest, you can simply adjust the metric and min_threshold arguments . E.g. if you are only interested in rules that have a lift score of >= 1.2, you would do the following: rules = association_rules(frequent_itemsets, metric=\"lift\", min_threshold=1.2) rules .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 4 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 5 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 Pandas DataFrames make it easy to filter the results further. Let's say we are ony interested in rules that satisfy the following criteria: at least 2 antecedents a confidence > 0.75 a lift score > 1.2 We could compute the antecedent length as follows: rules[\"antecedent_len\"] = rules[\"antecedents\"].apply(lambda x: len(x)) rules .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 0 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 1 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 1 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 2 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 2 4 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 5 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 1 Then, we can use pandas' selection syntax as shown below: rules[ (rules['antecedent_len'] >= 2) & (rules['confidence'] > 0.75) & (rules['lift'] > 1.2) ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.0 1.25 0.12 inf 2 Similarly, using the Pandas API, we can select entries based on the \"antecedents\" or \"consequents\" columns: rules[rules['antecedents'] == {'Eggs', 'Kidney Beans'}] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.6 2 Frozensets Note that the entries in the \"itemsets\" column are of type frozenset , which is built-in Python type that is similar to a Python set but immutable, which makes it more efficient for certain query or comparison operations (https://docs.python.org/3.6/library/stdtypes.html#frozenset). Since frozenset s are sets, the item order does not matter. I.e., the query rules[rules['antecedents'] == {'Eggs', 'Kidney Beans'}] is equivalent to any of the following three rules[rules['antecedents'] == {'Kidney Beans', 'Eggs'}] rules[rules['antecedents'] == frozenset(('Eggs', 'Kidney Beans'))] rules[rules['antecedents'] == frozenset(('Kidney Beans', 'Eggs'))] Example 3 -- Frequent Itemsets with Incomplete Antecedent and Consequent Information Most metrics computed by association_rules depends on the consequent and antecedent support score of a given rule provided in the frequent itemset input DataFrame. Consider the following example: import pandas as pd dict = {'itemsets': [['177', '176'], ['177', '179'], ['176', '178'], ['176', '179'], ['93', '100'], ['177', '178'], ['177', '176', '178']], 'support':[0.253623, 0.253623, 0.217391, 0.217391, 0.181159, 0.108696, 0.108696]} freq_itemsets = pd.DataFrame(dict) freq_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } itemsets support 0 [177, 176] 0.253623 1 [177, 179] 0.253623 2 [176, 178] 0.217391 3 [176, 179] 0.217391 4 [93, 100] 0.181159 5 [177, 178] 0.108696 6 [177, 176, 178] 0.108696 Note that this is a \"cropped\" DataFrame that doesn't contain the support values of the item subsets. This can create problems if we want to compute the association rule metrics for, e.g., 176 => 177 . For example, the confidence is computed as \\text{confidence}(A\\rightarrow C) = \\frac{\\text{support}(A\\rightarrow C)}{\\text{support}(A)}, \\;\\;\\; \\text{range: } [0, 1] But we do not have \\text{support}(A) . All we know about \"A\"'s support is that it is at least 0.253623. In these scenarios, where not all metric's can be computed, due to incomplete input DataFrames, you can use the support_only=True option, which will only compute the support column of a given rule that does not require as much info: \\text{support}(A\\rightarrow C) = \\text{support}(A \\cup C), \\;\\;\\; \\text{range: } [0, 1] \"NaN's\" will be assigned to all other metric columns: from mlxtend.frequent_patterns import association_rules res = association_rules(freq_itemsets, support_only=True, min_threshold=0.1) res .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (176) (177) NaN NaN 0.253623 NaN NaN NaN NaN 1 (177) (176) NaN NaN 0.253623 NaN NaN NaN NaN 2 (179) (177) NaN NaN 0.253623 NaN NaN NaN NaN 3 (177) (179) NaN NaN 0.253623 NaN NaN NaN NaN 4 (176) (178) NaN NaN 0.217391 NaN NaN NaN NaN 5 (178) (176) NaN NaN 0.217391 NaN NaN NaN NaN 6 (179) (176) NaN NaN 0.217391 NaN NaN NaN NaN 7 (176) (179) NaN NaN 0.217391 NaN NaN NaN NaN 8 (93) (100) NaN NaN 0.181159 NaN NaN NaN NaN 9 (100) (93) NaN NaN 0.181159 NaN NaN NaN NaN 10 (177) (178) NaN NaN 0.108696 NaN NaN NaN NaN 11 (178) (177) NaN NaN 0.108696 NaN NaN NaN NaN 12 (176, 177) (178) NaN NaN 0.108696 NaN NaN NaN NaN 13 (176, 178) (177) NaN NaN 0.108696 NaN NaN NaN NaN 14 (177, 178) (176) NaN NaN 0.108696 NaN NaN NaN NaN 15 (176) (177, 178) NaN NaN 0.108696 NaN NaN NaN NaN 16 (177) (176, 178) NaN NaN 0.108696 NaN NaN NaN NaN 17 (178) (176, 177) NaN NaN 0.108696 NaN NaN NaN NaN To clean up the representation, you may want to do the following: res = res[['antecedents', 'consequents', 'support']] res .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents support 0 (176) (177) 0.253623 1 (177) (176) 0.253623 2 (179) (177) 0.253623 3 (177) (179) 0.253623 4 (176) (178) 0.217391 5 (178) (176) 0.217391 6 (179) (176) 0.217391 7 (176) (179) 0.217391 8 (93) (100) 0.181159 9 (100) (93) 0.181159 10 (177) (178) 0.108696 11 (178) (177) 0.108696 12 (176, 177) (178) 0.108696 13 (176, 178) (177) 0.108696 14 (177, 178) (176) 0.108696 15 (176) (177, 178) 0.108696 16 (177) (176, 178) 0.108696 17 (178) (176, 177) 0.108696 API association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"Association rules"},{"location":"user_guide/frequent_patterns/association_rules/#association-rules-generation-from-frequent-itemsets","text":"Function to generate association rules from frequent itemsets from mlxtend.frequent_patterns import association_rules","title":"Association Rules Generation from Frequent Itemsets"},{"location":"user_guide/frequent_patterns/association_rules/#overview","text":"Rule generation is a common task in the mining of frequent patterns. An association rule is an implication expression of the form X \\rightarrow Y , where X and Y are disjoint itemsets [1]. A more concrete example based on consumer behaviour would be \\{Diapers\\} \\rightarrow \\{Beer\\} suggesting that people who buy diapers are also likely to buy beer. To evaluate the \"interest\" of such an association rule, different metrics have been developed. The current implementation make use of the confidence and lift metrics.","title":"Overview"},{"location":"user_guide/frequent_patterns/association_rules/#metrics","text":"The currently supported metrics for evaluating association rules and setting selection thresholds are listed below. Given a rule \"A -> C\", A stands for antecedent and C stands for consequent.","title":"Metrics"},{"location":"user_guide/frequent_patterns/association_rules/#support","text":"\\text{support}(A\\rightarrow C) = \\text{support}(A \\cup C), \\;\\;\\; \\text{range: } [0, 1] introduced in [3] The support metric is defined for itemsets, not assocication rules. The table produced by the association rule mining algorithm contains three different support metrics: 'antecedent support', 'consequent support', and 'support'. Here, 'antecedent support' computes the proportion of transactions that contain the antecedent A, and 'consequent support' computes the support for the itemset of the consequent C. The 'support' metric then computes the support of the combined itemset A \\cup C -- note that 'support' depends on 'antecedent support' and 'consequent support' via min('antecedent support', 'consequent support'). Typically, support is used to measure the abundance or frequency (often interpreted as significance or importance) of an itemset in a database. We refer to an itemset as a \"frequent itemset\" if you support is larger than a specified minimum-support threshold. Note that in general, due to the downward closure property, all subsets of a frequent itemset are also frequent.","title":"'support':"},{"location":"user_guide/frequent_patterns/association_rules/#confidence","text":"\\text{confidence}(A\\rightarrow C) = \\frac{\\text{support}(A\\rightarrow C)}{\\text{support}(A)}, \\;\\;\\; \\text{range: } [0, 1] introduced in [3] The confidence of a rule A->C is the probability of seeing the consequent in a transaction given that it also contains the antecedent. Note that the metric is not symmetric or directed; for instance, the confidence for A->C is different than the confidence for C->A. The confidence is 1 (maximal) for a rule A->C if the consequent and antecedent always occur together.","title":"'confidence':"},{"location":"user_guide/frequent_patterns/association_rules/#lift","text":"\\text{lift}(A\\rightarrow C) = \\frac{\\text{confidence}(A\\rightarrow C)}{\\text{support}(C)}, \\;\\;\\; \\text{range: } [0, \\infty] introduced in [4] The lift metric is commonly used to measure how much more often the antecedent and consequent of a rule A->C occur together than we would expect if they were statistically independent. If A and C are independent, the Lift score will be exactly 1.","title":"'lift':"},{"location":"user_guide/frequent_patterns/association_rules/#leverage","text":"\\text{levarage}(A\\rightarrow C) = \\text{support}(A\\rightarrow C) - \\text{support}(A) \\times \\text{support}(C), \\;\\;\\; \\text{range: } [-1, 1] introduced in [5] Leverage computes the difference between the observed frequency of A and C appearing together and the frequency that would be expected if A and C were independent. An leverage value of 0 indicates independence.","title":"'leverage':"},{"location":"user_guide/frequent_patterns/association_rules/#conviction","text":"\\text{conviction}(A\\rightarrow C) = \\frac{1 - \\text{support}(C)}{1 - \\text{confidence}(A\\rightarrow C)}, \\;\\;\\; \\text{range: } [0, \\infty] introduced in [6] A high conviction value means that the consequent is highly depending on the antecedent. For instance, in the case of a perfect confidence score, the denominator becomes 0 (due to 1 - 1) for which the conviction score is defined as 'inf'. Similar to lift, if items are independent, the conviction is 1.","title":"'conviction':"},{"location":"user_guide/frequent_patterns/association_rules/#references","text":"[1] Tan, Steinbach, Kumar. Introduction to Data Mining. Pearson New International Edition. Harlow: Pearson Education Ltd., 2014. (pp. 327-414). [2] Michael Hahsler, http://michael.hahsler.net/research/association_rules/measures.html [3] R. Agrawal, T. Imielinski, and A. Swami. Mining associations between sets of items in large databases. In Proc. of the ACM SIGMOD Int'l Conference on Management of Data, pages 207-216, Washington D.C., May 1993 [4] S. Brin, R. Motwani, J. D. Ullman, and S. Tsur. Dynamic itemset counting and implication rules for market basket data [5] Piatetsky-Shapiro, G., Discovery, analysis, and presentation of strong rules. Knowledge Discovery in Databases, 1991: p. 229-248. [6] Sergey Brin, Rajeev Motwani, Jeffrey D. Ullman, and Shalom Turk. Dynamic itemset counting and implication rules for market basket data. In SIGMOD 1997, Proceedings ACM SIGMOD International Conference on Management of Data, pages 255-264, Tucson, Arizona, USA, May 1997","title":"References"},{"location":"user_guide/frequent_patterns/association_rules/#example-1-generating-association-rules-from-frequent-itemsets","text":"The generate_rules takes dataframes of frequent itemsets as produced by the apriori function in mlxtend.association . To demonstrate the usage of the generate_rules method, we first create a pandas DataFrame of frequent itemsets as generated by the apriori function: import pandas as pd from mlxtend.preprocessing import TransactionEncoder from mlxtend.frequent_patterns import apriori dataset = [['Milk', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Dill', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Milk', 'Apple', 'Kidney Beans', 'Eggs'], ['Milk', 'Unicorn', 'Corn', 'Kidney Beans', 'Yogurt'], ['Corn', 'Onion', 'Onion', 'Kidney Beans', 'Ice cream', 'Eggs']] te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) df = pd.DataFrame(te_ary, columns=te.columns_) frequent_itemsets = apriori(df, min_support=0.6, use_colnames=True) frequent_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Kidney Beans, Eggs) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Kidney Beans, Eggs) The generate_rules() function allows you to (1) specify your metric of interest and (2) the according threshold. Currently implemented measures are confidence and lift . Let's say you are interesting in rules derived from the frequent itemsets only if the level of confidence is above the 90 percent threshold ( min_threshold=0.7 ): from mlxtend.frequent_patterns import association_rules association_rules(frequent_itemsets, metric=\"confidence\", min_threshold=0.7) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (Kidney Beans) (Eggs) 1.0 0.8 0.8 0.80 1.00 0.00 1.000000 1 (Eggs) (Kidney Beans) 0.8 1.0 0.8 1.00 1.00 0.00 inf 2 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 3 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 4 (Milk) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 5 (Onion) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 6 (Yogurt) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 7 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 8 (Onion, Eggs) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 9 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 10 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 11 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000","title":"Example 1 -- Generating Association Rules from Frequent Itemsets"},{"location":"user_guide/frequent_patterns/association_rules/#example-2-rule-generation-and-selection-criteria","text":"If you are interested in rules according to a different metric of interest, you can simply adjust the metric and min_threshold arguments . E.g. if you are only interested in rules that have a lift score of >= 1.2, you would do the following: rules = association_rules(frequent_itemsets, metric=\"lift\", min_threshold=1.2) rules .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 4 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 5 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 Pandas DataFrames make it easy to filter the results further. Let's say we are ony interested in rules that satisfy the following criteria: at least 2 antecedents a confidence > 0.75 a lift score > 1.2 We could compute the antecedent length as follows: rules[\"antecedent_len\"] = rules[\"antecedents\"].apply(lambda x: len(x)) rules .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 0 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 1 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 1 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 2 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 2 4 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 5 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 1 Then, we can use pandas' selection syntax as shown below: rules[ (rules['antecedent_len'] >= 2) & (rules['confidence'] > 0.75) & (rules['lift'] > 1.2) ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.0 1.25 0.12 inf 2 Similarly, using the Pandas API, we can select entries based on the \"antecedents\" or \"consequents\" columns: rules[rules['antecedents'] == {'Eggs', 'Kidney Beans'}] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.6 2 Frozensets Note that the entries in the \"itemsets\" column are of type frozenset , which is built-in Python type that is similar to a Python set but immutable, which makes it more efficient for certain query or comparison operations (https://docs.python.org/3.6/library/stdtypes.html#frozenset). Since frozenset s are sets, the item order does not matter. I.e., the query rules[rules['antecedents'] == {'Eggs', 'Kidney Beans'}] is equivalent to any of the following three rules[rules['antecedents'] == {'Kidney Beans', 'Eggs'}] rules[rules['antecedents'] == frozenset(('Eggs', 'Kidney Beans'))] rules[rules['antecedents'] == frozenset(('Kidney Beans', 'Eggs'))]","title":"Example 2 -- Rule Generation and Selection Criteria"},{"location":"user_guide/frequent_patterns/association_rules/#example-3-frequent-itemsets-with-incomplete-antecedent-and-consequent-information","text":"Most metrics computed by association_rules depends on the consequent and antecedent support score of a given rule provided in the frequent itemset input DataFrame. Consider the following example: import pandas as pd dict = {'itemsets': [['177', '176'], ['177', '179'], ['176', '178'], ['176', '179'], ['93', '100'], ['177', '178'], ['177', '176', '178']], 'support':[0.253623, 0.253623, 0.217391, 0.217391, 0.181159, 0.108696, 0.108696]} freq_itemsets = pd.DataFrame(dict) freq_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } itemsets support 0 [177, 176] 0.253623 1 [177, 179] 0.253623 2 [176, 178] 0.217391 3 [176, 179] 0.217391 4 [93, 100] 0.181159 5 [177, 178] 0.108696 6 [177, 176, 178] 0.108696 Note that this is a \"cropped\" DataFrame that doesn't contain the support values of the item subsets. This can create problems if we want to compute the association rule metrics for, e.g., 176 => 177 . For example, the confidence is computed as \\text{confidence}(A\\rightarrow C) = \\frac{\\text{support}(A\\rightarrow C)}{\\text{support}(A)}, \\;\\;\\; \\text{range: } [0, 1] But we do not have \\text{support}(A) . All we know about \"A\"'s support is that it is at least 0.253623. In these scenarios, where not all metric's can be computed, due to incomplete input DataFrames, you can use the support_only=True option, which will only compute the support column of a given rule that does not require as much info: \\text{support}(A\\rightarrow C) = \\text{support}(A \\cup C), \\;\\;\\; \\text{range: } [0, 1] \"NaN's\" will be assigned to all other metric columns: from mlxtend.frequent_patterns import association_rules res = association_rules(freq_itemsets, support_only=True, min_threshold=0.1) res .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (176) (177) NaN NaN 0.253623 NaN NaN NaN NaN 1 (177) (176) NaN NaN 0.253623 NaN NaN NaN NaN 2 (179) (177) NaN NaN 0.253623 NaN NaN NaN NaN 3 (177) (179) NaN NaN 0.253623 NaN NaN NaN NaN 4 (176) (178) NaN NaN 0.217391 NaN NaN NaN NaN 5 (178) (176) NaN NaN 0.217391 NaN NaN NaN NaN 6 (179) (176) NaN NaN 0.217391 NaN NaN NaN NaN 7 (176) (179) NaN NaN 0.217391 NaN NaN NaN NaN 8 (93) (100) NaN NaN 0.181159 NaN NaN NaN NaN 9 (100) (93) NaN NaN 0.181159 NaN NaN NaN NaN 10 (177) (178) NaN NaN 0.108696 NaN NaN NaN NaN 11 (178) (177) NaN NaN 0.108696 NaN NaN NaN NaN 12 (176, 177) (178) NaN NaN 0.108696 NaN NaN NaN NaN 13 (176, 178) (177) NaN NaN 0.108696 NaN NaN NaN NaN 14 (177, 178) (176) NaN NaN 0.108696 NaN NaN NaN NaN 15 (176) (177, 178) NaN NaN 0.108696 NaN NaN NaN NaN 16 (177) (176, 178) NaN NaN 0.108696 NaN NaN NaN NaN 17 (178) (176, 177) NaN NaN 0.108696 NaN NaN NaN NaN To clean up the representation, you may want to do the following: res = res[['antecedents', 'consequents', 'support']] res .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents support 0 (176) (177) 0.253623 1 (177) (176) 0.253623 2 (179) (177) 0.253623 3 (177) (179) 0.253623 4 (176) (178) 0.217391 5 (178) (176) 0.217391 6 (179) (176) 0.217391 7 (176) (179) 0.217391 8 (93) (100) 0.181159 9 (100) (93) 0.181159 10 (177) (178) 0.108696 11 (178) (177) 0.108696 12 (176, 177) (178) 0.108696 13 (176, 178) (177) 0.108696 14 (177, 178) (176) 0.108696 15 (176) (177, 178) 0.108696 16 (177) (176, 178) 0.108696 17 (178) (176, 177) 0.108696","title":"Example 3 -- Frequent Itemsets with Incomplete Antecedent and Consequent Information"},{"location":"user_guide/frequent_patterns/association_rules/#api","text":"association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"API"},{"location":"user_guide/general_concepts/activation-functions/","text":"Activation Functions for Artificial Neural Networks","title":"Activation Functions for Artificial Neural Networks"},{"location":"user_guide/general_concepts/activation-functions/#activation-functions-for-artificial-neural-networks","text":"","title":"Activation Functions for Artificial Neural Networks"},{"location":"user_guide/general_concepts/gradient-optimization/","text":"Gradient Descent and Stochastic Gradient Descent Gradient Descent (GD) Optimization Using the Gradient Decent optimization algorithm, the weights are updated incrementally after each epoch (= pass over the training dataset). Compatible cost functions J(\\cdot) Sum of squared errors (SSE) [ mlxtend.regressor.LinearRegression , mlxtend.classfier.Adaline ]: J(\\mathbf{w}) = \\frac{1}{2} \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})^2 Logistic Cost (cross-entropy) [ mlxtend.classfier.LogisticRegression ]: ... The magnitude and direction of the weight update is computed by taking a step in the opposite direction of the cost gradient \\Delta w_j = -\\eta \\frac{\\partial J}{\\partial w_j}, where \\eta is the learning rate. The weights are then updated after each epoch via the following update rule: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w}, where \\Delta\\mathbf{w} is a vector that contains the weight updates of each weight coefficient {w} , which are computed as follows: \\Delta w_j = -\\eta \\frac{\\partial J}{\\partial w_j}\\\\ = -\\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})(-x_{j}^{(i)})\\\\ = \\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)}. Essentially, we can picture Gradient Descent optimization as a hiker (the weight coefficient) who wants to climb down a mountain (cost function) into valley (cost minimum), and each step is determined by the steepness of the slope (gradient) and the leg length of the hiker (learning rate). Considering a cost function with only a single weight coefficient, we can illustrate this concept as follows: Stochastic Gradient Descent (SGD) In Gradient Descent optimization, we compute the cost gradient based on the complete training set; hence, we sometimes also call it batch gradient descent . In case of very large datasets, using Gradient Descent can be quite costly since we are only taking a single step for one pass over the training set -- thus, the larger the training set, the slower our algorithm updates the weights and the longer it may take until it converges to the global cost minimum (note that the SSE cost function is convex). In Stochastic Gradient Descent (sometimes also referred to as iterative or on-line gradient descent), we don't accumulate the weight updates as we've seen above for Gradient Descent: for one or more epochs: for each weight j w_j := w + \\Delta w_j , where: \\Delta w_j= \\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)} Instead, we update the weights after each training sample: for one or more epochs, or until approx. cost minimum is reached: for training sample i : for each weight j w_j := w + \\Delta w_j , where: \\Delta w_j= \\eta (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)} Here, the term \"stochastic\" comes from the fact that the gradient based on a single training sample is a \"stochastic approximation\" of the \"true\" cost gradient. Due to its stochastic nature, the path towards the global cost minimum is not \"direct\" as in Gradient Descent, but may go \"zig-zag\" if we are visuallizing the cost surface in a 2D space. However, it has been shown that Stochastic Gradient Descent almost surely converges to the global cost minimum if the cost function is convex (or pseudo-convex)[1]. Stochastic Gradient Descent Shuffling There are several different flavors of stochastic gradient descent, which can be all seen throughout the literature. Let's take a look at the three most common variants: A) randomly shuffle samples in the training set for one or more epochs, or until approx. cost minimum is reached for training sample i compute gradients and perform weight updates B) for one or more epochs, or until approx. cost minimum is reached randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates C) for iterations t , or until approx. cost minimum is reached: draw random sample from the training set compute gradients and perform weight updates In scenario A [3], we shuffle the training set only one time in the beginning; whereas in scenario B, we shuffle the training set after each epoch to prevent repeating update cycles. In both scenario A and scenario B, each training sample is only used once per epoch to update the model weights. In scenario C, we draw the training samples randomly with replacement from the training set [2]. If the number of iterations t is equal to the number of training samples, we learn the model based on a bootstrap sample of the training set. Mini-Batch Gradient Descent (MB-GD) Mini-Batch Gradient Descent (MB-GD) a compromise between batch GD and SGD. In MB-GD, we update the model based on smaller groups of training samples; instead of computing the gradient from 1 sample (SGD) or all n training samples (GD), we compute the gradient from 1 < k < n training samples (a common mini-batch size is k=50 ). MB-GD converges in fewer iterations than GD because we update the weights more frequently; however, MB-GD let's us utilize vectorized operation, which typically results in a computational performance gain over SGD. Learning Rates An adaptive learning rate \\eta : Choosing a decrease constant d that shrinks the learning rate over time: \\eta(t+1) := \\eta(t) / (1 + t \\times d) Momentum learning by adding a factor of the previous gradient to the weight update for faster updates: \\Delta \\mathbf{w}_{t+1} := \\eta \\nabla J(\\mathbf{w}_{t+1}) + \\alpha \\Delta {w}_{t} References [1] Bottou, L\u00e9on (1998). \"Online Algorithms and Stochastic Approximations\" . Online Learning and Neural Networks. Cambridge University Press. ISBN 978-0-521-65263-6 [2] Bottou, L\u00e9on. \"Large-scale machine learning with stochastic gradient descent.\" Proceedings of COMPSTAT'2010. Physica-Verlag HD, 2010. 177-186. [3] Bottou, L\u00e9on. \"Stochastic gradient descent tricks.\" Neural Networks: Tricks of the Trade. Springer Berlin Heidelberg, 2012. 421-436.","title":"Gradient Descent and Stochastic Gradient Descent"},{"location":"user_guide/general_concepts/gradient-optimization/#gradient-descent-and-stochastic-gradient-descent","text":"","title":"Gradient Descent and Stochastic Gradient Descent"},{"location":"user_guide/general_concepts/gradient-optimization/#gradient-descent-gd-optimization","text":"Using the Gradient Decent optimization algorithm, the weights are updated incrementally after each epoch (= pass over the training dataset). Compatible cost functions J(\\cdot) Sum of squared errors (SSE) [ mlxtend.regressor.LinearRegression , mlxtend.classfier.Adaline ]: J(\\mathbf{w}) = \\frac{1}{2} \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})^2 Logistic Cost (cross-entropy) [ mlxtend.classfier.LogisticRegression ]: ... The magnitude and direction of the weight update is computed by taking a step in the opposite direction of the cost gradient \\Delta w_j = -\\eta \\frac{\\partial J}{\\partial w_j}, where \\eta is the learning rate. The weights are then updated after each epoch via the following update rule: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w}, where \\Delta\\mathbf{w} is a vector that contains the weight updates of each weight coefficient {w} , which are computed as follows: \\Delta w_j = -\\eta \\frac{\\partial J}{\\partial w_j}\\\\ = -\\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})(-x_{j}^{(i)})\\\\ = \\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)}. Essentially, we can picture Gradient Descent optimization as a hiker (the weight coefficient) who wants to climb down a mountain (cost function) into valley (cost minimum), and each step is determined by the steepness of the slope (gradient) and the leg length of the hiker (learning rate). Considering a cost function with only a single weight coefficient, we can illustrate this concept as follows:","title":"Gradient Descent (GD) Optimization"},{"location":"user_guide/general_concepts/gradient-optimization/#stochastic-gradient-descent-sgd","text":"In Gradient Descent optimization, we compute the cost gradient based on the complete training set; hence, we sometimes also call it batch gradient descent . In case of very large datasets, using Gradient Descent can be quite costly since we are only taking a single step for one pass over the training set -- thus, the larger the training set, the slower our algorithm updates the weights and the longer it may take until it converges to the global cost minimum (note that the SSE cost function is convex). In Stochastic Gradient Descent (sometimes also referred to as iterative or on-line gradient descent), we don't accumulate the weight updates as we've seen above for Gradient Descent: for one or more epochs: for each weight j w_j := w + \\Delta w_j , where: \\Delta w_j= \\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)} Instead, we update the weights after each training sample: for one or more epochs, or until approx. cost minimum is reached: for training sample i : for each weight j w_j := w + \\Delta w_j , where: \\Delta w_j= \\eta (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)} Here, the term \"stochastic\" comes from the fact that the gradient based on a single training sample is a \"stochastic approximation\" of the \"true\" cost gradient. Due to its stochastic nature, the path towards the global cost minimum is not \"direct\" as in Gradient Descent, but may go \"zig-zag\" if we are visuallizing the cost surface in a 2D space. However, it has been shown that Stochastic Gradient Descent almost surely converges to the global cost minimum if the cost function is convex (or pseudo-convex)[1].","title":"Stochastic Gradient Descent (SGD)"},{"location":"user_guide/general_concepts/gradient-optimization/#stochastic-gradient-descent-shuffling","text":"There are several different flavors of stochastic gradient descent, which can be all seen throughout the literature. Let's take a look at the three most common variants:","title":"Stochastic Gradient Descent Shuffling"},{"location":"user_guide/general_concepts/gradient-optimization/#a","text":"randomly shuffle samples in the training set for one or more epochs, or until approx. cost minimum is reached for training sample i compute gradients and perform weight updates","title":"A)"},{"location":"user_guide/general_concepts/gradient-optimization/#b","text":"for one or more epochs, or until approx. cost minimum is reached randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates","title":"B)"},{"location":"user_guide/general_concepts/gradient-optimization/#c","text":"for iterations t , or until approx. cost minimum is reached: draw random sample from the training set compute gradients and perform weight updates In scenario A [3], we shuffle the training set only one time in the beginning; whereas in scenario B, we shuffle the training set after each epoch to prevent repeating update cycles. In both scenario A and scenario B, each training sample is only used once per epoch to update the model weights. In scenario C, we draw the training samples randomly with replacement from the training set [2]. If the number of iterations t is equal to the number of training samples, we learn the model based on a bootstrap sample of the training set.","title":"C)"},{"location":"user_guide/general_concepts/gradient-optimization/#mini-batch-gradient-descent-mb-gd","text":"Mini-Batch Gradient Descent (MB-GD) a compromise between batch GD and SGD. In MB-GD, we update the model based on smaller groups of training samples; instead of computing the gradient from 1 sample (SGD) or all n training samples (GD), we compute the gradient from 1 < k < n training samples (a common mini-batch size is k=50 ). MB-GD converges in fewer iterations than GD because we update the weights more frequently; however, MB-GD let's us utilize vectorized operation, which typically results in a computational performance gain over SGD.","title":"Mini-Batch Gradient Descent (MB-GD)"},{"location":"user_guide/general_concepts/gradient-optimization/#learning-rates","text":"An adaptive learning rate \\eta : Choosing a decrease constant d that shrinks the learning rate over time: \\eta(t+1) := \\eta(t) / (1 + t \\times d) Momentum learning by adding a factor of the previous gradient to the weight update for faster updates: \\Delta \\mathbf{w}_{t+1} := \\eta \\nabla J(\\mathbf{w}_{t+1}) + \\alpha \\Delta {w}_{t}","title":"Learning Rates"},{"location":"user_guide/general_concepts/gradient-optimization/#references","text":"[1] Bottou, L\u00e9on (1998). \"Online Algorithms and Stochastic Approximations\" . Online Learning and Neural Networks. Cambridge University Press. ISBN 978-0-521-65263-6 [2] Bottou, L\u00e9on. \"Large-scale machine learning with stochastic gradient descent.\" Proceedings of COMPSTAT'2010. Physica-Verlag HD, 2010. 177-186. [3] Bottou, L\u00e9on. \"Stochastic gradient descent tricks.\" Neural Networks: Tricks of the Trade. Springer Berlin Heidelberg, 2012. 421-436.","title":"References"},{"location":"user_guide/general_concepts/linear-gradient-derivative/","text":"Deriving the Gradient Descent Rule for Linear Regression and Adaline Linear Regression and Adaptive Linear Neurons (Adalines) are closely related to each other. In fact, the Adaline algorithm is a identical to linear regression except for a threshold function \\phi(\\cdot)_T that converts the continuous output into a categorical class label \\phi(z)_T = \\begin{cases} 1 & if \\; z \\geq 0 \\\\ 0 & if \\; z < 0 \\end{cases}, where z is the net input, which is computed as the sum of the input features \\mathbf{x} multiplied by the model weights \\mathbf{w} : z = w_0x_0 + w_1x_1 \\dots w_mx_m = \\sum_{j=0}^{m} x_j w_j = \\mathbf{w}^T \\mathbf{x} (Note that x_0 refers to the bias unit so that x_0=1 .) In the case of linear regression and Adaline, the activation function \\phi(\\cdot)_A is simply the identity function so that \\phi(z)_A = z . Now, in order to learn the optimal model weights \\mathbf{w} , we need to define a cost function that we can optimize. Here, our cost function J({\\cdot}) is the sum of squared errors (SSE), which we multiply by \\frac{1}{2} to make the derivation easier: J({\\mathbf{w}}) = \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2, where y^{(i)} is the label or target label of the i th training point x^{(i)} . (Note that the SSE cost function is convex and therefore differentiable.) In simple words, we can summarize the gradient descent learning as follows: Initialize the weights to 0 or small random numbers. For k epochs (passes over the training set) For each training sample x^{(i)} Compute the predicted output value \\hat{y}^{(i)} Compare \\hat{y}^{(i)} to the actual output y^{(i)} and Compute the \"weight update\" value Update the \"weight update\" value Update the weight coefficients by the accumulated \"weight update\" values Which we can translate into a more mathematical notation: Initialize the weights to 0 or small random numbers. For k epochs For each training sample x^{(i)} \\phi(z^{(i)})_A = \\hat{y}^{(i)} \\Delta w_{(t+1), \\; j} = \\eta (y^{(i)} - \\hat{y}^{(i)}) x_{j}^{(i)}\\; (where \\eta is the learning rate); \\Delta w_{j} := \\Delta w_j\\; + \\Delta w_{(t+1), \\;j} \\mathbf{w} := \\mathbf{w} + \\Delta \\mathbf{w} Performing this global weight update \\mathbf{w} := \\mathbf{w} + \\Delta \\mathbf{w}, can be understood as \"updating the model weights by taking an opposite step towards the cost gradient scaled by the learning rate \\eta \" \\Delta \\mathbf{w} = - \\eta \\nabla J(\\mathbf{w}), where the partial derivative with respect to each w_j can be written as \\frac{\\partial J}{\\partial w_j} = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) x_{j}^{(i)}. To summarize: in order to use gradient descent to learn the model coefficients, we simply update the weights \\mathbf{w} by taking a step into the opposite direction of the gradient for each pass over the training set -- that's basically it. But how do we get to the equation \\frac{\\partial J}{\\partial w_j} = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) x_{j}^{(i)}? Let's walk through the derivation step by step. \\begin{aligned} & \\frac{\\partial J}{\\partial w_j} \\\\ & = \\frac{\\partial}{\\partial w_j} \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2 \\\\ & = \\frac{1}{2} \\frac{\\partial}{\\partial w_j} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2 \\\\ & = \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\frac{\\partial}{\\partial w_j} \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\\\ & = \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\frac{\\partial}{\\partial w_j} \\bigg(y^{(i)} - \\sum_i \\big(w_{j}^{(i)} x_{j}^{(i)} \\big) \\bigg) \\\\ & = \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)(-x_{j}^{(i)}) \\\\ & = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)x_{j}^{(i)} \\end{aligned}","title":"Deriving the Gradient Descent Rule for Linear Regression and Adaline"},{"location":"user_guide/general_concepts/linear-gradient-derivative/#deriving-the-gradient-descent-rule-for-linear-regression-and-adaline","text":"Linear Regression and Adaptive Linear Neurons (Adalines) are closely related to each other. In fact, the Adaline algorithm is a identical to linear regression except for a threshold function \\phi(\\cdot)_T that converts the continuous output into a categorical class label \\phi(z)_T = \\begin{cases} 1 & if \\; z \\geq 0 \\\\ 0 & if \\; z < 0 \\end{cases}, where z is the net input, which is computed as the sum of the input features \\mathbf{x} multiplied by the model weights \\mathbf{w} : z = w_0x_0 + w_1x_1 \\dots w_mx_m = \\sum_{j=0}^{m} x_j w_j = \\mathbf{w}^T \\mathbf{x} (Note that x_0 refers to the bias unit so that x_0=1 .) In the case of linear regression and Adaline, the activation function \\phi(\\cdot)_A is simply the identity function so that \\phi(z)_A = z . Now, in order to learn the optimal model weights \\mathbf{w} , we need to define a cost function that we can optimize. Here, our cost function J({\\cdot}) is the sum of squared errors (SSE), which we multiply by \\frac{1}{2} to make the derivation easier: J({\\mathbf{w}}) = \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2, where y^{(i)} is the label or target label of the i th training point x^{(i)} . (Note that the SSE cost function is convex and therefore differentiable.) In simple words, we can summarize the gradient descent learning as follows: Initialize the weights to 0 or small random numbers. For k epochs (passes over the training set) For each training sample x^{(i)} Compute the predicted output value \\hat{y}^{(i)} Compare \\hat{y}^{(i)} to the actual output y^{(i)} and Compute the \"weight update\" value Update the \"weight update\" value Update the weight coefficients by the accumulated \"weight update\" values Which we can translate into a more mathematical notation: Initialize the weights to 0 or small random numbers. For k epochs For each training sample x^{(i)} \\phi(z^{(i)})_A = \\hat{y}^{(i)} \\Delta w_{(t+1), \\; j} = \\eta (y^{(i)} - \\hat{y}^{(i)}) x_{j}^{(i)}\\; (where \\eta is the learning rate); \\Delta w_{j} := \\Delta w_j\\; + \\Delta w_{(t+1), \\;j} \\mathbf{w} := \\mathbf{w} + \\Delta \\mathbf{w} Performing this global weight update \\mathbf{w} := \\mathbf{w} + \\Delta \\mathbf{w}, can be understood as \"updating the model weights by taking an opposite step towards the cost gradient scaled by the learning rate \\eta \" \\Delta \\mathbf{w} = - \\eta \\nabla J(\\mathbf{w}), where the partial derivative with respect to each w_j can be written as \\frac{\\partial J}{\\partial w_j} = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) x_{j}^{(i)}. To summarize: in order to use gradient descent to learn the model coefficients, we simply update the weights \\mathbf{w} by taking a step into the opposite direction of the gradient for each pass over the training set -- that's basically it. But how do we get to the equation \\frac{\\partial J}{\\partial w_j} = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) x_{j}^{(i)}? Let's walk through the derivation step by step. \\begin{aligned} & \\frac{\\partial J}{\\partial w_j} \\\\ & = \\frac{\\partial}{\\partial w_j} \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2 \\\\ & = \\frac{1}{2} \\frac{\\partial}{\\partial w_j} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2 \\\\ & = \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\frac{\\partial}{\\partial w_j} \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\\\ & = \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\frac{\\partial}{\\partial w_j} \\bigg(y^{(i)} - \\sum_i \\big(w_{j}^{(i)} x_{j}^{(i)} \\big) \\bigg) \\\\ & = \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)(-x_{j}^{(i)}) \\\\ & = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)x_{j}^{(i)} \\end{aligned}","title":"Deriving the Gradient Descent Rule for Linear Regression and Adaline"},{"location":"user_guide/general_concepts/regularization-linear/","text":"Regularization of Generalized Linear Models Overview We can understand regularization as an approach of adding an additional bias to a model to reduce the degree of overfitting in models that suffer from high variance. By adding regularization terms to the cost function, we penalize large model coefficients (weights); effectively, we are reducing the complexity of the model. L2 regularization In L2 regularization, we shrink the weights by computing the Euclidean norm of the weight coefficients (the weight vector \\mathbf{w} ); \\lambda is the regularization parameter to be optimized. L2: \\lambda\\; \\lVert \\mathbf{w} \\lVert_2 = \\lambda \\sum_{j=1}^{m} w_j^2 For example, we can regularize the sum of squared errors cost function (SSE) as follows: SSE = \\sum^{n}_{i=1} \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 + L2 Intuitively, we can think of regression as an additional penalty term or constraint as shown in the figure below. Without regularization, our objective is to find the global cost minimum. By adding a regularization penalty, our objective becomes to minimize the cost function under the constraint that we have to stay within our \"budget\" (the gray-shaded ball). In addition, we can control the regularization strength via the regularization parameter \\lambda . The larger the value of \\lambda , the stronger the regularization of the model. The weight coefficients approach 0 when \\lambda goes towards infinity. L1 regularization In L1 regularization, we shrink the weights using the absolute values of the weight coefficients (the weight vector \\mathbf{w} ); \\lambda is the regularization parameter to be optimized. L1: \\lambda \\; \\lVert\\mathbf{w}\\rVert_1 = \\lambda \\sum_{j=1}^{m} |w_j| For example, we can regularize the sum of squared errors cost function (SSE) as follows: SSE = \\sum^{n}_{i=1} \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 + L1 At its core, L1-regularization is very similar to L2 regularization. However, instead of a quadratic penalty term as in L2, we penalize the model by the absolute weight coefficients. As we can see in the figure below, our \"budget\" has \"sharp edges,\" which is the geometric interpretation of why the L1 model induces sparsity. References [1] M. Y. Park and T. Hastie. \"L1-regularization path algorithm for generalized linear models\" . Journal of the Royal Statistical Society: Series B (Statistical Methodology), 69(4):659\u2013677, 2007. [2] A. Y. Ng. \"Feature selection, L1 vs. L2 regularization, and rotational invariance\" . In Proceedings of the twenty-first international conference on Machine learning, page 78. ACM, 2004.","title":"Regularization of Generalized Linear Models"},{"location":"user_guide/general_concepts/regularization-linear/#regularization-of-generalized-linear-models","text":"","title":"Regularization of Generalized Linear Models"},{"location":"user_guide/general_concepts/regularization-linear/#overview","text":"We can understand regularization as an approach of adding an additional bias to a model to reduce the degree of overfitting in models that suffer from high variance. By adding regularization terms to the cost function, we penalize large model coefficients (weights); effectively, we are reducing the complexity of the model.","title":"Overview"},{"location":"user_guide/general_concepts/regularization-linear/#l2-regularization","text":"In L2 regularization, we shrink the weights by computing the Euclidean norm of the weight coefficients (the weight vector \\mathbf{w} ); \\lambda is the regularization parameter to be optimized. L2: \\lambda\\; \\lVert \\mathbf{w} \\lVert_2 = \\lambda \\sum_{j=1}^{m} w_j^2 For example, we can regularize the sum of squared errors cost function (SSE) as follows: SSE = \\sum^{n}_{i=1} \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 + L2 Intuitively, we can think of regression as an additional penalty term or constraint as shown in the figure below. Without regularization, our objective is to find the global cost minimum. By adding a regularization penalty, our objective becomes to minimize the cost function under the constraint that we have to stay within our \"budget\" (the gray-shaded ball). In addition, we can control the regularization strength via the regularization parameter \\lambda . The larger the value of \\lambda , the stronger the regularization of the model. The weight coefficients approach 0 when \\lambda goes towards infinity.","title":"L2 regularization"},{"location":"user_guide/general_concepts/regularization-linear/#l1-regularization","text":"In L1 regularization, we shrink the weights using the absolute values of the weight coefficients (the weight vector \\mathbf{w} ); \\lambda is the regularization parameter to be optimized. L1: \\lambda \\; \\lVert\\mathbf{w}\\rVert_1 = \\lambda \\sum_{j=1}^{m} |w_j| For example, we can regularize the sum of squared errors cost function (SSE) as follows: SSE = \\sum^{n}_{i=1} \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 + L1 At its core, L1-regularization is very similar to L2 regularization. However, instead of a quadratic penalty term as in L2, we penalize the model by the absolute weight coefficients. As we can see in the figure below, our \"budget\" has \"sharp edges,\" which is the geometric interpretation of why the L1 model induces sparsity.","title":"L1 regularization"},{"location":"user_guide/general_concepts/regularization-linear/#references","text":"[1] M. Y. Park and T. Hastie. \"L1-regularization path algorithm for generalized linear models\" . Journal of the Royal Statistical Society: Series B (Statistical Methodology), 69(4):659\u2013677, 2007. [2] A. Y. Ng. \"Feature selection, L1 vs. L2 regularization, and rotational invariance\" . In Proceedings of the twenty-first international conference on Machine learning, page 78. ACM, 2004.","title":"References"},{"location":"user_guide/image/extract_face_landmarks/","text":"Extract Face Landmarks A function extract facial landmarks. from mlxtend.image import extract_face_landmarks Overview The extract_face_landmarks function detects the faces in a given image, and then it will return the face landmark points (also known as face shape) for the first found face in the image based on dlib's face landmark detection code (http://dlib.net/face_landmark_detection_ex.cpp.html): The face detector we use is made using the classic Histogram of Oriented Gradients (HOG) feature combined with a linear classifier, an image pyramid, and sliding window detection scheme. The pose estimator was created by using dlib's implementation of the paper: One Millisecond Face Alignment with an Ensemble of Regression Trees by Vahid Kazemi and Josephine Sullivan, CVPR 2014 and was trained on the iBUG 300-W face landmark dataset (see https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/): C. Sagonas, E. Antonakos, G, Tzimiropoulos, S. Zafeiriou, M. Pantic. 300 faces In-the-wild challenge: Database and results. Image and Vision Computing (IMAVIS), Special Issue on Facial Landmark Localisation \"In-The-Wild\". 2016. You can get the trained model file from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2. Note that the license for the iBUG 300-W dataset excludes commercial use. So you should contact Imperial College London to find out if it's OK for you to use this model file in a commercial product. References Kazemi, Vahid, and Josephine Sullivan. \"One millisecond face alignment with an ensemble of regression trees.\" Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2014. Example 1 import imageio import matplotlib.pyplot as plt from mlxtend.image import extract_face_landmarks img = imageio.imread('lena.png') landmarks = extract_face_landmarks(img) print(landmarks.shape) print('\\n\\nFirst 10 landmarks:\\n', landmarks[:10]) (68, 2) First 10 landmarks: [[206 266] [204 290] [205 314] [209 337] [220 357] [236 374] [253 387] [273 397] [290 398] [304 391]] Visualization of the landmarks: fig = plt.figure(figsize=(15, 5)) ax = fig.add_subplot(1, 3, 1) ax.imshow(img) ax = fig.add_subplot(1, 3, 2) ax.scatter(landmarks[:, 0], -landmarks[:, 1], alpha=0.8) ax = fig.add_subplot(1, 3, 3) img2 = img.copy() for p in landmarks: img2[p[1]-3:p[1]+3,p[0]-3:p[0]+3,:] = (255, 255, 255) ax.imshow(img2) plt.show() API extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"Extract Face Landmarks"},{"location":"user_guide/image/extract_face_landmarks/#extract-face-landmarks","text":"A function extract facial landmarks. from mlxtend.image import extract_face_landmarks","title":"Extract Face Landmarks"},{"location":"user_guide/image/extract_face_landmarks/#overview","text":"The extract_face_landmarks function detects the faces in a given image, and then it will return the face landmark points (also known as face shape) for the first found face in the image based on dlib's face landmark detection code (http://dlib.net/face_landmark_detection_ex.cpp.html): The face detector we use is made using the classic Histogram of Oriented Gradients (HOG) feature combined with a linear classifier, an image pyramid, and sliding window detection scheme. The pose estimator was created by using dlib's implementation of the paper: One Millisecond Face Alignment with an Ensemble of Regression Trees by Vahid Kazemi and Josephine Sullivan, CVPR 2014 and was trained on the iBUG 300-W face landmark dataset (see https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/): C. Sagonas, E. Antonakos, G, Tzimiropoulos, S. Zafeiriou, M. Pantic. 300 faces In-the-wild challenge: Database and results. Image and Vision Computing (IMAVIS), Special Issue on Facial Landmark Localisation \"In-The-Wild\". 2016. You can get the trained model file from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2. Note that the license for the iBUG 300-W dataset excludes commercial use. So you should contact Imperial College London to find out if it's OK for you to use this model file in a commercial product.","title":"Overview"},{"location":"user_guide/image/extract_face_landmarks/#references","text":"Kazemi, Vahid, and Josephine Sullivan. \"One millisecond face alignment with an ensemble of regression trees.\" Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2014.","title":"References"},{"location":"user_guide/image/extract_face_landmarks/#example-1","text":"import imageio import matplotlib.pyplot as plt from mlxtend.image import extract_face_landmarks img = imageio.imread('lena.png') landmarks = extract_face_landmarks(img) print(landmarks.shape) print('\\n\\nFirst 10 landmarks:\\n', landmarks[:10]) (68, 2) First 10 landmarks: [[206 266] [204 290] [205 314] [209 337] [220 357] [236 374] [253 387] [273 397] [290 398] [304 391]] Visualization of the landmarks: fig = plt.figure(figsize=(15, 5)) ax = fig.add_subplot(1, 3, 1) ax.imshow(img) ax = fig.add_subplot(1, 3, 2) ax.scatter(landmarks[:, 0], -landmarks[:, 1], alpha=0.8) ax = fig.add_subplot(1, 3, 3) img2 = img.copy() for p in landmarks: img2[p[1]-3:p[1]+3,p[0]-3:p[0]+3,:] = (255, 255, 255) ax.imshow(img2) plt.show()","title":"Example 1"},{"location":"user_guide/image/extract_face_landmarks/#api","text":"extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"API"},{"location":"user_guide/math/num_combinations/","text":"Compute the Number of Combinations A function to calculate the number of combinations for creating subsequences of k elements out of a sequence with n elements. from mlxtend.math import num_combinations Overview Combinations are selections of items from a collection regardless of the order in which they appear (in contrast to permutations). For example, let's consider a combination of 3 elements (k=3) from a collection of 5 elements (n=5): collection: {1, 2, 3, 4, 5} combination 1a: {1, 3, 5} combination 1b: {1, 5, 3} combination 1c: {3, 5, 1} ... combination 2: {1, 3, 4} In the example above the combinations 1a, 1b, and 1c, are the \"same combination\" and counted as \"1 possible way to combine items 1, 3, and 5\" -- in combinations, the order does not matter. The number of ways to combine elements ( without replacement ) from a collection with size n into subsets of size k is computed via the binomial coefficient (\" n choose k \"): \\begin{pmatrix} n \\\\ k \\end{pmatrix} = \\frac{n(n-1)\\ldots(n-k+1)}{k(k-1)\\dots1} = \\frac{n!}{k!(n-k)!} To compute the number of combinations with replacement , the following, alternative equation is used (\" n multichoose k \"): \\begin{pmatrix} n \\\\ k \\end{pmatrix} = \\begin{pmatrix} n + k -1 \\\\ k \\end{pmatrix} References https://en.wikipedia.org/wiki/Combination Example 1 - Compute the number of combinations from mlxtend.math import num_combinations c = num_combinations(n=20, k=8, with_replacement=False) print('Number of ways to combine 20 elements' ' into 8 subelements: %d' % c) Number of ways to combine 20 elements into 8 subelements: 125970 from mlxtend.math import num_combinations c = num_combinations(n=20, k=8, with_replacement=True) print('Number of ways to combine 20 elements' ' into 8 subelements (with replacement): %d' % c) Number of ways to combine 20 elements into 8 subelements (with replacement): 2220075 Example 2 - A progress tracking use-case It is often quite useful to track the progress of a computational expensive tasks to estimate its runtime. Here, the num_combination function can be used to compute the maximum number of loops of a combinations iterable from itertools: import itertools import sys import time from mlxtend.math import num_combinations items = {1, 2, 3, 4, 5, 6, 7, 8} max_iter = num_combinations(n=len(items), k=3, with_replacement=False) for idx, i in enumerate(itertools.combinations(items, r=3)): # do some computation with itemset i time.sleep(0.1) sys.stdout.write('\\rProgress: %d/%d' % (idx + 1, max_iter)) sys.stdout.flush() Progress: 56/56 API num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/","title":"Compute the Number of Combinations"},{"location":"user_guide/math/num_combinations/#compute-the-number-of-combinations","text":"A function to calculate the number of combinations for creating subsequences of k elements out of a sequence with n elements. from mlxtend.math import num_combinations","title":"Compute the Number of Combinations"},{"location":"user_guide/math/num_combinations/#overview","text":"Combinations are selections of items from a collection regardless of the order in which they appear (in contrast to permutations). For example, let's consider a combination of 3 elements (k=3) from a collection of 5 elements (n=5): collection: {1, 2, 3, 4, 5} combination 1a: {1, 3, 5} combination 1b: {1, 5, 3} combination 1c: {3, 5, 1} ... combination 2: {1, 3, 4} In the example above the combinations 1a, 1b, and 1c, are the \"same combination\" and counted as \"1 possible way to combine items 1, 3, and 5\" -- in combinations, the order does not matter. The number of ways to combine elements ( without replacement ) from a collection with size n into subsets of size k is computed via the binomial coefficient (\" n choose k \"): \\begin{pmatrix} n \\\\ k \\end{pmatrix} = \\frac{n(n-1)\\ldots(n-k+1)}{k(k-1)\\dots1} = \\frac{n!}{k!(n-k)!} To compute the number of combinations with replacement , the following, alternative equation is used (\" n multichoose k \"): \\begin{pmatrix} n \\\\ k \\end{pmatrix} = \\begin{pmatrix} n + k -1 \\\\ k \\end{pmatrix}","title":"Overview"},{"location":"user_guide/math/num_combinations/#references","text":"https://en.wikipedia.org/wiki/Combination","title":"References"},{"location":"user_guide/math/num_combinations/#example-1-compute-the-number-of-combinations","text":"from mlxtend.math import num_combinations c = num_combinations(n=20, k=8, with_replacement=False) print('Number of ways to combine 20 elements' ' into 8 subelements: %d' % c) Number of ways to combine 20 elements into 8 subelements: 125970 from mlxtend.math import num_combinations c = num_combinations(n=20, k=8, with_replacement=True) print('Number of ways to combine 20 elements' ' into 8 subelements (with replacement): %d' % c) Number of ways to combine 20 elements into 8 subelements (with replacement): 2220075","title":"Example 1 - Compute the number of combinations"},{"location":"user_guide/math/num_combinations/#example-2-a-progress-tracking-use-case","text":"It is often quite useful to track the progress of a computational expensive tasks to estimate its runtime. Here, the num_combination function can be used to compute the maximum number of loops of a combinations iterable from itertools: import itertools import sys import time from mlxtend.math import num_combinations items = {1, 2, 3, 4, 5, 6, 7, 8} max_iter = num_combinations(n=len(items), k=3, with_replacement=False) for idx, i in enumerate(itertools.combinations(items, r=3)): # do some computation with itemset i time.sleep(0.1) sys.stdout.write('\\rProgress: %d/%d' % (idx + 1, max_iter)) sys.stdout.flush() Progress: 56/56","title":"Example 2 - A progress tracking use-case"},{"location":"user_guide/math/num_combinations/#api","text":"num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/","title":"API"},{"location":"user_guide/math/num_permutations/","text":"Compute the Number of Permutations A function to calculate the number of permutations for creating subsequences of k elements out of a sequence with n elements. from mlxtend.math import num_permutations Overview Permutations are selections of items from a collection with regard to the order in which they appear (in contrast to combinations). For example, let's consider a permutation of 3 elements (k=3) from a collection of 5 elements (n=5): collection: {1, 2, 3, 4, 5} combination 1a: {1, 3, 5} combination 1b: {1, 5, 3} combination 1c: {3, 5, 1} ... combination 2: {1, 3, 4} In the example above the permutations 1a, 1b, and 1c, are the \"same combination\" but distinct permutations -- in combinations, the order does not matter, but in permutation it does matter. The number of ways to combine elements ( without replacement ) from a collection with size n into subsets of size k is computed via the binomial coefficient (\" n choose k \"): k!\\begin{pmatrix} n \\\\ k \\end{pmatrix} = k! \\cdot \\frac{n!}{k!(n-k)!} = \\frac{n!}{(n-k)!} To compute the number of permutations with replacement , we simply need to compute n^k . References https://en.wikipedia.org/wiki/Permutation Example 1 - Compute the number of permutations from mlxtend.math import num_permutations c = num_permutations(n=20, k=8, with_replacement=False) print('Number of ways to permute 20 elements' ' into 8 subelements: %d' % c) Number of ways to permute 20 elements into 8 subelements: 5079110400 from mlxtend.math import num_permutations c = num_permutations(n=20, k=8, with_replacement=True) print('Number of ways to combine 20 elements' ' into 8 subelements (with replacement): %d' % c) Number of ways to combine 20 elements into 8 subelements (with replacement): 25600000000 Example 2 - A progress tracking use-case It is often quite useful to track the progress of a computational expensive tasks to estimate its runtime. Here, the num_combination function can be used to compute the maximum number of loops of a permutations iterable from itertools: import itertools import sys import time from mlxtend.math import num_permutations items = {1, 2, 3, 4, 5, 6, 7, 8} max_iter = num_permutations(n=len(items), k=3, with_replacement=False) for idx, i in enumerate(itertools.permutations(items, r=3)): # do some computation with itemset i time.sleep(0.01) sys.stdout.write('\\rProgress: %d/%d' % (idx + 1, max_iter)) sys.stdout.flush() Progress: 336/336 API num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/","title":"Compute the Number of Permutations"},{"location":"user_guide/math/num_permutations/#compute-the-number-of-permutations","text":"A function to calculate the number of permutations for creating subsequences of k elements out of a sequence with n elements. from mlxtend.math import num_permutations","title":"Compute the Number of Permutations"},{"location":"user_guide/math/num_permutations/#overview","text":"Permutations are selections of items from a collection with regard to the order in which they appear (in contrast to combinations). For example, let's consider a permutation of 3 elements (k=3) from a collection of 5 elements (n=5): collection: {1, 2, 3, 4, 5} combination 1a: {1, 3, 5} combination 1b: {1, 5, 3} combination 1c: {3, 5, 1} ... combination 2: {1, 3, 4} In the example above the permutations 1a, 1b, and 1c, are the \"same combination\" but distinct permutations -- in combinations, the order does not matter, but in permutation it does matter. The number of ways to combine elements ( without replacement ) from a collection with size n into subsets of size k is computed via the binomial coefficient (\" n choose k \"): k!\\begin{pmatrix} n \\\\ k \\end{pmatrix} = k! \\cdot \\frac{n!}{k!(n-k)!} = \\frac{n!}{(n-k)!} To compute the number of permutations with replacement , we simply need to compute n^k .","title":"Overview"},{"location":"user_guide/math/num_permutations/#references","text":"https://en.wikipedia.org/wiki/Permutation","title":"References"},{"location":"user_guide/math/num_permutations/#example-1-compute-the-number-of-permutations","text":"from mlxtend.math import num_permutations c = num_permutations(n=20, k=8, with_replacement=False) print('Number of ways to permute 20 elements' ' into 8 subelements: %d' % c) Number of ways to permute 20 elements into 8 subelements: 5079110400 from mlxtend.math import num_permutations c = num_permutations(n=20, k=8, with_replacement=True) print('Number of ways to combine 20 elements' ' into 8 subelements (with replacement): %d' % c) Number of ways to combine 20 elements into 8 subelements (with replacement): 25600000000","title":"Example 1 - Compute the number of permutations"},{"location":"user_guide/math/num_permutations/#example-2-a-progress-tracking-use-case","text":"It is often quite useful to track the progress of a computational expensive tasks to estimate its runtime. Here, the num_combination function can be used to compute the maximum number of loops of a permutations iterable from itertools: import itertools import sys import time from mlxtend.math import num_permutations items = {1, 2, 3, 4, 5, 6, 7, 8} max_iter = num_permutations(n=len(items), k=3, with_replacement=False) for idx, i in enumerate(itertools.permutations(items, r=3)): # do some computation with itemset i time.sleep(0.01) sys.stdout.write('\\rProgress: %d/%d' % (idx + 1, max_iter)) sys.stdout.flush() Progress: 336/336","title":"Example 2 - A progress tracking use-case"},{"location":"user_guide/math/num_permutations/#api","text":"num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/","title":"API"},{"location":"user_guide/math/vectorspace_dimensionality/","text":"Vectorspace Dimensionality A function to compute the number of dimensions a set of vectors (arranged as columns in a matrix) spans. from mlxtend.math import vectorspace_dimensionality Overview Given a set of vectors, arranged as columns in a matrix, the vectorspace_dimensionality computes the number of dimensions (i.e., hyper-volume) that the vectorspace spans using the Gram-Schmidt process [1]. In particular, since the Gram-Schmidt process yields vectors that are zero or normalized to 1 (i.e., an orthonormal vectorset if the input was a set of linearly independent vectors), the sum of the vector norms corresponds to the number of dimensions of a vectorset. References [1] https://en.wikipedia.org/wiki/Gram\u2013Schmidt_process Example 1 - Compute the dimensions of a vectorspace Let's assume we have the two basis vectors x=[1 \\;\\;\\; 0]^T and y=[0\\;\\;\\; 1]^T as columns in a matrix. Due to the linear independence of the two vectors, the space that they span is naturally a plane (2D space): import numpy as np from mlxtend.math import vectorspace_dimensionality a = np.array([[1, 0], [0, 1]]) vectorspace_dimensionality(a) 2 However, if one vector is a linear combination of the other, it's intuitive to see that the space the vectorset describes is merely a line, aka a 1D space: b = np.array([[1, 2], [0, 0]]) vectorspace_dimensionality(a) 2 If 3 vectors are all linearly independent of each other, the dimensionality of the vector space is a volume (i.e., a 3D space): d = np.array([[1, 9, 1], [3, 2, 2], [5, 4, 3]]) vectorspace_dimensionality(d) 3 Again, if a pair of vectors is linearly dependent (here: the 1st and the 2nd row), this reduces the dimensionality by 1: c = np.array([[1, 2, 1], [3, 6, 2], [5, 10, 3]]) vectorspace_dimensionality(c) 2 API vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] A set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set","title":"Vectorspace Dimensionality"},{"location":"user_guide/math/vectorspace_dimensionality/#vectorspace-dimensionality","text":"A function to compute the number of dimensions a set of vectors (arranged as columns in a matrix) spans. from mlxtend.math import vectorspace_dimensionality","title":"Vectorspace Dimensionality"},{"location":"user_guide/math/vectorspace_dimensionality/#overview","text":"Given a set of vectors, arranged as columns in a matrix, the vectorspace_dimensionality computes the number of dimensions (i.e., hyper-volume) that the vectorspace spans using the Gram-Schmidt process [1]. In particular, since the Gram-Schmidt process yields vectors that are zero or normalized to 1 (i.e., an orthonormal vectorset if the input was a set of linearly independent vectors), the sum of the vector norms corresponds to the number of dimensions of a vectorset.","title":"Overview"},{"location":"user_guide/math/vectorspace_dimensionality/#references","text":"[1] https://en.wikipedia.org/wiki/Gram\u2013Schmidt_process","title":"References"},{"location":"user_guide/math/vectorspace_dimensionality/#example-1-compute-the-dimensions-of-a-vectorspace","text":"Let's assume we have the two basis vectors x=[1 \\;\\;\\; 0]^T and y=[0\\;\\;\\; 1]^T as columns in a matrix. Due to the linear independence of the two vectors, the space that they span is naturally a plane (2D space): import numpy as np from mlxtend.math import vectorspace_dimensionality a = np.array([[1, 0], [0, 1]]) vectorspace_dimensionality(a) 2 However, if one vector is a linear combination of the other, it's intuitive to see that the space the vectorset describes is merely a line, aka a 1D space: b = np.array([[1, 2], [0, 0]]) vectorspace_dimensionality(a) 2 If 3 vectors are all linearly independent of each other, the dimensionality of the vector space is a volume (i.e., a 3D space): d = np.array([[1, 9, 1], [3, 2, 2], [5, 4, 3]]) vectorspace_dimensionality(d) 3 Again, if a pair of vectors is linearly dependent (here: the 1st and the 2nd row), this reduces the dimensionality by 1: c = np.array([[1, 2, 1], [3, 6, 2], [5, 10, 3]]) vectorspace_dimensionality(c) 2","title":"Example 1 - Compute the dimensions of a vectorspace"},{"location":"user_guide/math/vectorspace_dimensionality/#api","text":"vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] A set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set","title":"API"},{"location":"user_guide/math/vectorspace_orthonormalization/","text":"Vectorspace Orthonormalization A function that converts a set of linearly independent vectors to a set of orthonormal basis vectors. from mlxtend.math import vectorspace_orthonormalization Overview The vectorspace_orthonormalization converts a set linearly independent vectors to a set of orthonormal basis vectors using the Gram-Schmidt process [1]. References [1] https://en.wikipedia.org/wiki/Gram\u2013Schmidt_process Example 1 - Convert a set of vector to an orthonormal basis Note that to convert a set of linearly independent vectors into a set of orthonormal basis vectors, the vectorspace_orthonormalization function expects the vectors to be arranged as columns of a matrix (here: NumPy array). Please keep in mind that the vectorspace_orthonormalization function also works for non-linearly independent vector sets; however, the resulting vectorset won't be orthonormal as a result. An easy way to check whether all vectors in the input set are linearly independent is to use the numpy.linalg.det (determinant) function. import numpy as np from mlxtend.math import vectorspace_orthonormalization a = np.array([[2, 0, 4, 12], [0, 2, 16, 4], [4, 16, 6, 2], [2, -12, 4, 6]]) s = '' if np.linalg.det(a) == 0.0: s = ' not' print('Input vectors are%s linearly independent' % s) vectorspace_orthonormalization(a) Input vectors are linearly independent array([[ 0.40824829, -0.1814885 , 0.04982278, 0.89325973], [ 0. , 0.1088931 , 0.99349591, -0.03328918], [ 0.81649658, 0.50816781, -0.06462163, -0.26631346], [ 0.40824829, -0.83484711, 0.07942048, -0.36063281]]) Note that scaling the inputs equally by a factor should leave the results unchanged: vectorspace_orthonormalization(a/2) array([[ 0.40824829, -0.1814885 , 0.04982278, 0.89325973], [ 0. , 0.1088931 , 0.99349591, -0.03328918], [ 0.81649658, 0.50816781, -0.06462163, -0.26631346], [ 0.40824829, -0.83484711, 0.07942048, -0.36063281]]) However, in case of linear dependence (the second column is a linear combination of the first column in the example below), the vector elements of one of the dependent vectors will become zero. (For a pair of linear dependent vectors, the one with the larger column index will be the one that's zero-ed.) a[:, 1] = a[:, 0] * 2 vectorspace_orthonormalization(a) array([[ 0.40824829, 0. , 0.04155858, 0.82364839], [ 0. , 0. , 0.99740596, -0.06501108], [ 0.81649658, 0. , -0.04155858, -0.52008861], [ 0.40824829, 0. , 0.04155858, 0.21652883]]) API vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of linearly independent vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] A set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"Vectorspace Orthonormalization"},{"location":"user_guide/math/vectorspace_orthonormalization/#vectorspace-orthonormalization","text":"A function that converts a set of linearly independent vectors to a set of orthonormal basis vectors. from mlxtend.math import vectorspace_orthonormalization","title":"Vectorspace Orthonormalization"},{"location":"user_guide/math/vectorspace_orthonormalization/#overview","text":"The vectorspace_orthonormalization converts a set linearly independent vectors to a set of orthonormal basis vectors using the Gram-Schmidt process [1].","title":"Overview"},{"location":"user_guide/math/vectorspace_orthonormalization/#references","text":"[1] https://en.wikipedia.org/wiki/Gram\u2013Schmidt_process","title":"References"},{"location":"user_guide/math/vectorspace_orthonormalization/#example-1-convert-a-set-of-vector-to-an-orthonormal-basis","text":"Note that to convert a set of linearly independent vectors into a set of orthonormal basis vectors, the vectorspace_orthonormalization function expects the vectors to be arranged as columns of a matrix (here: NumPy array). Please keep in mind that the vectorspace_orthonormalization function also works for non-linearly independent vector sets; however, the resulting vectorset won't be orthonormal as a result. An easy way to check whether all vectors in the input set are linearly independent is to use the numpy.linalg.det (determinant) function. import numpy as np from mlxtend.math import vectorspace_orthonormalization a = np.array([[2, 0, 4, 12], [0, 2, 16, 4], [4, 16, 6, 2], [2, -12, 4, 6]]) s = '' if np.linalg.det(a) == 0.0: s = ' not' print('Input vectors are%s linearly independent' % s) vectorspace_orthonormalization(a) Input vectors are linearly independent array([[ 0.40824829, -0.1814885 , 0.04982278, 0.89325973], [ 0. , 0.1088931 , 0.99349591, -0.03328918], [ 0.81649658, 0.50816781, -0.06462163, -0.26631346], [ 0.40824829, -0.83484711, 0.07942048, -0.36063281]]) Note that scaling the inputs equally by a factor should leave the results unchanged: vectorspace_orthonormalization(a/2) array([[ 0.40824829, -0.1814885 , 0.04982278, 0.89325973], [ 0. , 0.1088931 , 0.99349591, -0.03328918], [ 0.81649658, 0.50816781, -0.06462163, -0.26631346], [ 0.40824829, -0.83484711, 0.07942048, -0.36063281]]) However, in case of linear dependence (the second column is a linear combination of the first column in the example below), the vector elements of one of the dependent vectors will become zero. (For a pair of linear dependent vectors, the one with the larger column index will be the one that's zero-ed.) a[:, 1] = a[:, 0] * 2 vectorspace_orthonormalization(a) array([[ 0.40824829, 0. , 0.04155858, 0.82364839], [ 0. , 0. , 0.99740596, -0.06501108], [ 0.81649658, 0. , -0.04155858, -0.52008861], [ 0.40824829, 0. , 0.04155858, 0.21652883]])","title":"Example 1 - Convert a set of vector to an orthonormal basis"},{"location":"user_guide/math/vectorspace_orthonormalization/#api","text":"vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of linearly independent vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] A set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"API"},{"location":"user_guide/plotting/category_scatter/","text":"Scatterplot with Categories A function to quickly produce a scatter plot colored by categories from a pandas DataFrame or NumPy ndarray object. from mlxtend.general_plotting import category_scatter Overview References - Example 1 - Category Scatter from Pandas DataFrames import pandas as pd from io import StringIO csvfile = \"\"\"label,x,y class1,10.0,8.04 class1,10.5,7.30 class2,8.3,5.5 class2,8.1,5.9 class3,3.5,3.5 class3,3.8,5.1\"\"\" df = pd.read_csv(StringIO(csvfile)) df label x y 0 class1 10.0 8.04 1 class1 10.5 7.30 2 class2 8.3 5.50 3 class2 8.1 5.90 4 class3 3.5 3.50 5 class3 3.8 5.10 Plotting the data where the categories are determined by the unique values in the label column label_col . The x and y values are simply the column names of the DataFrame that we want to plot. import matplotlib.pyplot as plt from mlxtend.plotting import category_scatter fig = category_scatter(x='x', y='y', label_col='label', data=df, legend_loc='upper left') Example 2 - Category Scatter from NumPy Arrays import numpy as np from io import BytesIO csvfile = \"\"\"1,10.0,8.04 1,10.5,7.30 2,8.3,5.5 2,8.1,5.9 3,3.5,3.5 3,3.8,5.1\"\"\" ary = np.genfromtxt(BytesIO(csvfile.encode()), delimiter=',') ary array([[ 1. , 10. , 8.04], [ 1. , 10.5 , 7.3 ], [ 2. , 8.3 , 5.5 ], [ 2. , 8.1 , 5.9 ], [ 3. , 3.5 , 3.5 ], [ 3. , 3.8 , 5.1 ]]) Now, pretending that the first column represents the labels, and the second and third column represent the x and y values, respectively. import matplotlib.pyplot as plt from mlxtend.plotting import category_scatter fix = category_scatter(x=1, y=2, label_col=0, data=ary, legend_loc='upper left') API category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/","title":"Scatterplot with Categories"},{"location":"user_guide/plotting/category_scatter/#scatterplot-with-categories","text":"A function to quickly produce a scatter plot colored by categories from a pandas DataFrame or NumPy ndarray object. from mlxtend.general_plotting import category_scatter","title":"Scatterplot with Categories"},{"location":"user_guide/plotting/category_scatter/#overview","text":"","title":"Overview"},{"location":"user_guide/plotting/category_scatter/#references","text":"-","title":"References"},{"location":"user_guide/plotting/category_scatter/#example-1-category-scatter-from-pandas-dataframes","text":"import pandas as pd from io import StringIO csvfile = \"\"\"label,x,y class1,10.0,8.04 class1,10.5,7.30 class2,8.3,5.5 class2,8.1,5.9 class3,3.5,3.5 class3,3.8,5.1\"\"\" df = pd.read_csv(StringIO(csvfile)) df label x y 0 class1 10.0 8.04 1 class1 10.5 7.30 2 class2 8.3 5.50 3 class2 8.1 5.90 4 class3 3.5 3.50 5 class3 3.8 5.10 Plotting the data where the categories are determined by the unique values in the label column label_col . The x and y values are simply the column names of the DataFrame that we want to plot. import matplotlib.pyplot as plt from mlxtend.plotting import category_scatter fig = category_scatter(x='x', y='y', label_col='label', data=df, legend_loc='upper left')","title":"Example 1 - Category Scatter from Pandas DataFrames"},{"location":"user_guide/plotting/category_scatter/#example-2-category-scatter-from-numpy-arrays","text":"import numpy as np from io import BytesIO csvfile = \"\"\"1,10.0,8.04 1,10.5,7.30 2,8.3,5.5 2,8.1,5.9 3,3.5,3.5 3,3.8,5.1\"\"\" ary = np.genfromtxt(BytesIO(csvfile.encode()), delimiter=',') ary array([[ 1. , 10. , 8.04], [ 1. , 10.5 , 7.3 ], [ 2. , 8.3 , 5.5 ], [ 2. , 8.1 , 5.9 ], [ 3. , 3.5 , 3.5 ], [ 3. , 3.8 , 5.1 ]]) Now, pretending that the first column represents the labels, and the second and third column represent the x and y values, respectively. import matplotlib.pyplot as plt from mlxtend.plotting import category_scatter fix = category_scatter(x=1, y=2, label_col=0, data=ary, legend_loc='upper left')","title":"Example 2 - Category Scatter from NumPy Arrays"},{"location":"user_guide/plotting/category_scatter/#api","text":"category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/","title":"API"},{"location":"user_guide/plotting/checkerboard_plot/","text":"Checkerboard Plot Function to plot a checkerboard plot / heat map via matplotlib from mlxtend.plotting import checkerboard plot Overview Function to plot a checkerboard plot / heat map via matplotlib. References - Example 1 - Default from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt import numpy as np ary = np.random.random((5, 4)) brd = checkerboard_plot(ary) plt.show() Example 2 - Changing colors and labels from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt import numpy as np checkerboard_plot(ary, col_labels=['abc', 'def', 'ghi', 'jkl'], row_labels=['sample %d' % i for i in range(1, 6)], cell_colors=['skyblue', 'whitesmoke'], font_colors=['black', 'black'], figsize=(4.5, 5)) plt.show() API checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/","title":"Checkerboard Plot"},{"location":"user_guide/plotting/checkerboard_plot/#checkerboard-plot","text":"Function to plot a checkerboard plot / heat map via matplotlib from mlxtend.plotting import checkerboard plot","title":"Checkerboard Plot"},{"location":"user_guide/plotting/checkerboard_plot/#overview","text":"Function to plot a checkerboard plot / heat map via matplotlib.","title":"Overview"},{"location":"user_guide/plotting/checkerboard_plot/#references","text":"-","title":"References"},{"location":"user_guide/plotting/checkerboard_plot/#example-1-default","text":"from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt import numpy as np ary = np.random.random((5, 4)) brd = checkerboard_plot(ary) plt.show()","title":"Example 1 - Default"},{"location":"user_guide/plotting/checkerboard_plot/#example-2-changing-colors-and-labels","text":"from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt import numpy as np checkerboard_plot(ary, col_labels=['abc', 'def', 'ghi', 'jkl'], row_labels=['sample %d' % i for i in range(1, 6)], cell_colors=['skyblue', 'whitesmoke'], font_colors=['black', 'black'], figsize=(4.5, 5)) plt.show()","title":"Example 2 - Changing colors and labels"},{"location":"user_guide/plotting/checkerboard_plot/#api","text":"checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/","title":"API"},{"location":"user_guide/plotting/ecdf/","text":"Empirical Cumulative Distribution Function Plot A function to conveniently plot an empirical cumulative distribution function. from mlxtend.ecdf import ecdf Overview A function to conveniently plot an empirical cumulative distribution function (ECDF) and adding percentile thresholds for exploratory data analysis. References - Example 1 - ECDF from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() ax, _, _ = ecdf(x=X[:, 0], x_label='sepal length (cm)') plt.show() Example 2 - Multiple ECDFs from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() # first ecdf x1 = X[:, 0] ax, _, _ = ecdf(x1, x_label='cm') # second ecdf x2 = X[:, 1] ax, _, _ = ecdf(x2, ax=ax) plt.legend(['sepal length', 'sepal width']) plt.show() Example 3 - ECDF with Percentile Thresholds from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() ax, threshold, count = ecdf(x=X[:, 0], x_label='sepal length (cm)', percentile=0.8) plt.show() print('Feature threshold at the 80th percentile:', threshold) print('Number of samples below the threshold:', count) Feature threshold at the 80th percentile: 6.5 Number of samples below the threshold: 120 API ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/","title":"Empirical Cumulative Distribution Function Plot"},{"location":"user_guide/plotting/ecdf/#empirical-cumulative-distribution-function-plot","text":"A function to conveniently plot an empirical cumulative distribution function. from mlxtend.ecdf import ecdf","title":"Empirical Cumulative Distribution Function Plot"},{"location":"user_guide/plotting/ecdf/#overview","text":"A function to conveniently plot an empirical cumulative distribution function (ECDF) and adding percentile thresholds for exploratory data analysis.","title":"Overview"},{"location":"user_guide/plotting/ecdf/#references","text":"-","title":"References"},{"location":"user_guide/plotting/ecdf/#example-1-ecdf","text":"from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() ax, _, _ = ecdf(x=X[:, 0], x_label='sepal length (cm)') plt.show()","title":"Example 1 - ECDF"},{"location":"user_guide/plotting/ecdf/#example-2-multiple-ecdfs","text":"from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() # first ecdf x1 = X[:, 0] ax, _, _ = ecdf(x1, x_label='cm') # second ecdf x2 = X[:, 1] ax, _, _ = ecdf(x2, ax=ax) plt.legend(['sepal length', 'sepal width']) plt.show()","title":"Example 2 - Multiple ECDFs"},{"location":"user_guide/plotting/ecdf/#example-3-ecdf-with-percentile-thresholds","text":"from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() ax, threshold, count = ecdf(x=X[:, 0], x_label='sepal length (cm)', percentile=0.8) plt.show() print('Feature threshold at the 80th percentile:', threshold) print('Number of samples below the threshold:', count) Feature threshold at the 80th percentile: 6.5 Number of samples below the threshold: 120","title":"Example 3 - ECDF with Percentile Thresholds"},{"location":"user_guide/plotting/ecdf/#api","text":"ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/","title":"API"},{"location":"user_guide/plotting/enrichment_plot/","text":"Enrichment Plot A function to plot step plots of cumulative counts. from mlxtend.general_plotting import category_scatter Overview In enrichment plots, the y-axis can be interpreted as \"how many samples are less or equal to the corresponding x-axis label.\" References - Example 1 - Enrichment Plots from Pandas DataFrames import pandas as pd s1 = [1.1, 1.5] s2 = [2.1, 1.8] s3 = [3.1, 2.1] s4 = [3.9, 2.5] data = [s1, s2, s3, s4] df = pd.DataFrame(data, columns=['X1', 'X2']) df X1 X2 0 1.1 1.5 1 2.1 1.8 2 3.1 2.1 3 3.9 2.5 Plotting the data where the categories are determined by the unique values in the label column label_col . The x and y values are simply the column names of the DataFrame that we want to plot. import matplotlib.pyplot as plt from mlxtend.plotting import enrichment_plot ax = enrichment_plot(df, legend_loc='upper left') API enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/","title":"Enrichment Plot"},{"location":"user_guide/plotting/enrichment_plot/#enrichment-plot","text":"A function to plot step plots of cumulative counts. from mlxtend.general_plotting import category_scatter","title":"Enrichment Plot"},{"location":"user_guide/plotting/enrichment_plot/#overview","text":"In enrichment plots, the y-axis can be interpreted as \"how many samples are less or equal to the corresponding x-axis label.\"","title":"Overview"},{"location":"user_guide/plotting/enrichment_plot/#references","text":"-","title":"References"},{"location":"user_guide/plotting/enrichment_plot/#example-1-enrichment-plots-from-pandas-dataframes","text":"import pandas as pd s1 = [1.1, 1.5] s2 = [2.1, 1.8] s3 = [3.1, 2.1] s4 = [3.9, 2.5] data = [s1, s2, s3, s4] df = pd.DataFrame(data, columns=['X1', 'X2']) df X1 X2 0 1.1 1.5 1 2.1 1.8 2 3.1 2.1 3 3.9 2.5 Plotting the data where the categories are determined by the unique values in the label column label_col . The x and y values are simply the column names of the DataFrame that we want to plot. import matplotlib.pyplot as plt from mlxtend.plotting import enrichment_plot ax = enrichment_plot(df, legend_loc='upper left')","title":"Example 1 - Enrichment Plots from Pandas DataFrames"},{"location":"user_guide/plotting/enrichment_plot/#api","text":"enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/","title":"API"},{"location":"user_guide/plotting/plot_confusion_matrix/","text":"Confusion Matrix Utility function for visualizing confusion matrices via matplotlib from mlxtend.plotting import plot_confusion_matrix Overview Confusion Matrix For more information on confusion matrices, please see mlxtend.evaluate.confusion_matrix . References - Example 1 - Binary from mlxtend.plotting import plot_confusion_matrix import matplotlib.pyplot as plt import numpy as np binary = np.array([[4, 1], [1, 2]]) fig, ax = plot_confusion_matrix(conf_mat=binary) plt.show() Example 2 - Binary absolute and relative with colorbar binary = np.array([[4, 1], [1, 2]]) fig, ax = plot_confusion_matrix(conf_mat=binary, show_absolute=True, show_normed=True, colorbar=True) plt.show() Example 3 - Multiclass relative multiclass = np.array([[2, 1, 0, 0], [1, 2, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) fig, ax = plot_confusion_matrix(conf_mat=multiclass, colorbar=True, show_absolute=False, show_normed=True) plt.show() API plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/","title":"Confusion Matrix"},{"location":"user_guide/plotting/plot_confusion_matrix/#confusion-matrix","text":"Utility function for visualizing confusion matrices via matplotlib from mlxtend.plotting import plot_confusion_matrix","title":"Confusion Matrix"},{"location":"user_guide/plotting/plot_confusion_matrix/#overview","text":"","title":"Overview"},{"location":"user_guide/plotting/plot_confusion_matrix/#confusion-matrix_1","text":"For more information on confusion matrices, please see mlxtend.evaluate.confusion_matrix .","title":"Confusion Matrix"},{"location":"user_guide/plotting/plot_confusion_matrix/#references","text":"-","title":"References"},{"location":"user_guide/plotting/plot_confusion_matrix/#example-1-binary","text":"from mlxtend.plotting import plot_confusion_matrix import matplotlib.pyplot as plt import numpy as np binary = np.array([[4, 1], [1, 2]]) fig, ax = plot_confusion_matrix(conf_mat=binary) plt.show()","title":"Example 1 - Binary"},{"location":"user_guide/plotting/plot_confusion_matrix/#example-2-binary-absolute-and-relative-with-colorbar","text":"binary = np.array([[4, 1], [1, 2]]) fig, ax = plot_confusion_matrix(conf_mat=binary, show_absolute=True, show_normed=True, colorbar=True) plt.show()","title":"Example 2 - Binary absolute and relative with colorbar"},{"location":"user_guide/plotting/plot_confusion_matrix/#example-3-multiclass-relative","text":"multiclass = np.array([[2, 1, 0, 0], [1, 2, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) fig, ax = plot_confusion_matrix(conf_mat=multiclass, colorbar=True, show_absolute=False, show_normed=True) plt.show()","title":"Example 3 - Multiclass relative"},{"location":"user_guide/plotting/plot_confusion_matrix/#api","text":"plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/","title":"API"},{"location":"user_guide/plotting/plot_decision_regions/","text":"Plotting Decision Regions A function for plotting decision regions of classifiers in 1 or 2 dimensions. from mlxtend.plotting import plot_decision_regions References Example 1 - Decision regions in 2D from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show() Example 2 - Decision regions in 1D from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, 2] X = X[:, None] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.title('SVM on Iris') plt.show() Example 3 - Decision Region Grids from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn import datasets import numpy as np # Initializing Classifiers clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() clf4 = SVC() # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0,2]] y = iris.target import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning. FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:459: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning. \"this warning.\", FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/ensemble/forest.py:248: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22. \"10 in version 0.20 to 100 in 0.22.\", FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning) Example 4 - Highlighting Test Data Points from mlxtend.plotting import plot_decision_regions from mlxtend.preprocessing import shuffle_arrays_unison import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X, y = iris.data[:, [0,2]], iris.target X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=3) X_train, y_train = X[:100], y[:100] X_test, y_test = X[100:], y[100:] # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X_train, y_train) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2, X_highlight=X_test) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show() Example 5 - Evaluating Classifier Behavior on Non-Linear Problems from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC # Initializing Classifiers clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(n_estimators=100, random_state=1) clf3 = GaussianNB() clf4 = SVC() # Loading Plotting Utilities import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import itertools from mlxtend.plotting import plot_decision_regions import numpy as np XOR xx, yy = np.meshgrid(np.linspace(-3, 3, 50), np.linspace(-3, 3, 50)) rng = np.random.RandomState(0) X = rng.randn(300, 2) y = np.array(np.logical_xor(X[:, 0] > 0, X[:, 1] > 0), dtype=int) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning. FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning) Half-Moons from sklearn.datasets import make_moons X, y = make_moons(n_samples=100, random_state=123) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning. FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning) Concentric Circles from sklearn.datasets import make_circles X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning. FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning) Example 6 - Working with existing axes objects using subplots import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn import datasets import numpy as np # Loading some example data iris = datasets.load_iris() X = iris.data[:, 2] X = X[:, None] y = iris.target # Initializing and fitting classifiers clf1 = LogisticRegression(random_state=1) clf2 = GaussianNB() clf1.fit(X, y) clf2.fit(X, y) fig, axes = plt.subplots(1, 2, figsize=(10, 3)) fig = plot_decision_regions(X=X, y=y, clf=clf1, ax=axes[0], legend=2) fig = plot_decision_regions(X=X, y=y, clf=clf2, ax=axes[1], legend=1) plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning. FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:459: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning. \"this warning.\", FutureWarning) Example 7 - Decision regions with more than two training features from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data X, y = datasets.make_blobs(n_samples=600, n_features=3, centers=[[2, 2, -2],[-2, -2, 2]], cluster_std=[2, 2], random_state=2) # Training a classifier svm = SVC() svm.fit(X, y) # Plotting decision regions fig, ax = plt.subplots() # Decision region for feature 3 = 1.5 value = 1.5 # Plot training sample with feature 3 = 1.5 +/- 0.75 width = 0.75 plot_decision_regions(X, y, clf=svm, filler_feature_values={2: value}, filler_feature_ranges={2: width}, legend=2, ax=ax) ax.set_xlabel('Feature 1') ax.set_ylabel('Feature 2') ax.set_title('Feature 3 = {}'.format(value)) # Adding axes annotations fig.suptitle('SVM on make_blobs') plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning) Example 8 - Grid of decision region slices from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data X, y = datasets.make_blobs(n_samples=500, n_features=3, centers=[[2, 2, -2],[-2, -2, 2]], cluster_std=[2, 2], random_state=2) # Training a classifier svm = SVC() svm.fit(X, y) # Plotting decision regions fig, axarr = plt.subplots(2, 2, figsize=(10,8), sharex=True, sharey=True) values = [-4.0, -1.0, 1.0, 4.0] width = 0.75 for value, ax in zip(values, axarr.flat): plot_decision_regions(X, y, clf=svm, filler_feature_values={2: value}, filler_feature_ranges={2: width}, legend=2, ax=ax) ax.set_xlabel('Feature 1') ax.set_ylabel('Feature 2') ax.set_title('Feature 3 = {}'.format(value)) # Adding axes annotations fig.suptitle('SVM on make_blobs') plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning) Example 9 - Customizing the plotting style from mlxtend.plotting import plot_decision_regions from mlxtend.preprocessing import shuffle_arrays_unison import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=3) X_train, y_train = X[:100], y[:100] X_test, y_test = X[100:], y[100:] # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X_train, y_train) # Specify keyword arguments to be passed to underlying plotting functions scatter_kwargs = {'s': 120, 'edgecolor': None, 'alpha': 0.7} contourf_kwargs = {'alpha': 0.2} scatter_highlight_kwargs = {'s': 120, 'label': 'Test data', 'alpha': 0.7} # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2, X_highlight=X_test, scatter_kwargs=scatter_kwargs, contourf_kwargs=contourf_kwargs, scatter_highlight_kwargs=scatter_highlight_kwargs) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show() Example 10 - Providing your own legend labels Custom legend labels can be provided by returning the axis object(s) from the plot_decision_region function and then getting the handles and labels of the legend. Custom handles (i.e., labels) can then be provided via ax.legend ax = plot_decision_regions(X, y, clf=svm, legend=0) handles, labels = ax.get_legend_handles_labels() ax.legend(handles, ['class 0', 'class 1', 'class 2'], framealpha=0.3, scatterpoints=1) An example is shown below. from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions ax = plot_decision_regions(X, y, clf=svm, legend=0) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') handles, labels = ax.get_legend_handles_labels() ax.legend(handles, ['class square', 'class triangle', 'class circle'], framealpha=0.3, scatterpoints=1) plt.show() API plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/","title":"Plotting Decision Regions"},{"location":"user_guide/plotting/plot_decision_regions/#plotting-decision-regions","text":"A function for plotting decision regions of classifiers in 1 or 2 dimensions. from mlxtend.plotting import plot_decision_regions","title":"Plotting Decision Regions"},{"location":"user_guide/plotting/plot_decision_regions/#references","text":"","title":"References"},{"location":"user_guide/plotting/plot_decision_regions/#example-1-decision-regions-in-2d","text":"from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show()","title":"Example 1 - Decision regions in 2D"},{"location":"user_guide/plotting/plot_decision_regions/#example-2-decision-regions-in-1d","text":"from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, 2] X = X[:, None] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.title('SVM on Iris') plt.show()","title":"Example 2 - Decision regions in 1D"},{"location":"user_guide/plotting/plot_decision_regions/#example-3-decision-region-grids","text":"from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn import datasets import numpy as np # Initializing Classifiers clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() clf4 = SVC() # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0,2]] y = iris.target import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning. FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:459: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning. \"this warning.\", FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/ensemble/forest.py:248: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22. \"10 in version 0.20 to 100 in 0.22.\", FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning)","title":"Example 3 - Decision Region Grids"},{"location":"user_guide/plotting/plot_decision_regions/#example-4-highlighting-test-data-points","text":"from mlxtend.plotting import plot_decision_regions from mlxtend.preprocessing import shuffle_arrays_unison import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X, y = iris.data[:, [0,2]], iris.target X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=3) X_train, y_train = X[:100], y[:100] X_test, y_test = X[100:], y[100:] # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X_train, y_train) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2, X_highlight=X_test) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show()","title":"Example 4 - Highlighting Test Data Points"},{"location":"user_guide/plotting/plot_decision_regions/#example-5-evaluating-classifier-behavior-on-non-linear-problems","text":"from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC # Initializing Classifiers clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(n_estimators=100, random_state=1) clf3 = GaussianNB() clf4 = SVC() # Loading Plotting Utilities import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import itertools from mlxtend.plotting import plot_decision_regions import numpy as np","title":"Example 5 - Evaluating Classifier Behavior on Non-Linear Problems"},{"location":"user_guide/plotting/plot_decision_regions/#xor","text":"xx, yy = np.meshgrid(np.linspace(-3, 3, 50), np.linspace(-3, 3, 50)) rng = np.random.RandomState(0) X = rng.randn(300, 2) y = np.array(np.logical_xor(X[:, 0] > 0, X[:, 1] > 0), dtype=int) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning. FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning)","title":"XOR"},{"location":"user_guide/plotting/plot_decision_regions/#half-moons","text":"from sklearn.datasets import make_moons X, y = make_moons(n_samples=100, random_state=123) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning. FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning)","title":"Half-Moons"},{"location":"user_guide/plotting/plot_decision_regions/#concentric-circles","text":"from sklearn.datasets import make_circles X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning. FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning)","title":"Concentric Circles"},{"location":"user_guide/plotting/plot_decision_regions/#example-6-working-with-existing-axes-objects-using-subplots","text":"import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn import datasets import numpy as np # Loading some example data iris = datasets.load_iris() X = iris.data[:, 2] X = X[:, None] y = iris.target # Initializing and fitting classifiers clf1 = LogisticRegression(random_state=1) clf2 = GaussianNB() clf1.fit(X, y) clf2.fit(X, y) fig, axes = plt.subplots(1, 2, figsize=(10, 3)) fig = plot_decision_regions(X=X, y=y, clf=clf1, ax=axes[0], legend=2) fig = plot_decision_regions(X=X, y=y, clf=clf2, ax=axes[1], legend=1) plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning. FutureWarning) /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:459: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning. \"this warning.\", FutureWarning)","title":"Example 6 - Working with existing axes objects using subplots"},{"location":"user_guide/plotting/plot_decision_regions/#example-7-decision-regions-with-more-than-two-training-features","text":"from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data X, y = datasets.make_blobs(n_samples=600, n_features=3, centers=[[2, 2, -2],[-2, -2, 2]], cluster_std=[2, 2], random_state=2) # Training a classifier svm = SVC() svm.fit(X, y) # Plotting decision regions fig, ax = plt.subplots() # Decision region for feature 3 = 1.5 value = 1.5 # Plot training sample with feature 3 = 1.5 +/- 0.75 width = 0.75 plot_decision_regions(X, y, clf=svm, filler_feature_values={2: value}, filler_feature_ranges={2: width}, legend=2, ax=ax) ax.set_xlabel('Feature 1') ax.set_ylabel('Feature 2') ax.set_title('Feature 3 = {}'.format(value)) # Adding axes annotations fig.suptitle('SVM on make_blobs') plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning)","title":"Example 7 - Decision regions with more than two training features"},{"location":"user_guide/plotting/plot_decision_regions/#example-8-grid-of-decision-region-slices","text":"from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data X, y = datasets.make_blobs(n_samples=500, n_features=3, centers=[[2, 2, -2],[-2, -2, 2]], cluster_std=[2, 2], random_state=2) # Training a classifier svm = SVC() svm.fit(X, y) # Plotting decision regions fig, axarr = plt.subplots(2, 2, figsize=(10,8), sharex=True, sharey=True) values = [-4.0, -1.0, 1.0, 4.0] width = 0.75 for value, ax in zip(values, axarr.flat): plot_decision_regions(X, y, clf=svm, filler_feature_values={2: value}, filler_feature_ranges={2: width}, legend=2, ax=ax) ax.set_xlabel('Feature 1') ax.set_ylabel('Feature 2') ax.set_title('Feature 3 = {}'.format(value)) # Adding axes annotations fig.suptitle('SVM on make_blobs') plt.show() /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning. \"avoid this warning.\", FutureWarning)","title":"Example 8 - Grid of decision region slices"},{"location":"user_guide/plotting/plot_decision_regions/#example-9-customizing-the-plotting-style","text":"from mlxtend.plotting import plot_decision_regions from mlxtend.preprocessing import shuffle_arrays_unison import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=3) X_train, y_train = X[:100], y[:100] X_test, y_test = X[100:], y[100:] # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X_train, y_train) # Specify keyword arguments to be passed to underlying plotting functions scatter_kwargs = {'s': 120, 'edgecolor': None, 'alpha': 0.7} contourf_kwargs = {'alpha': 0.2} scatter_highlight_kwargs = {'s': 120, 'label': 'Test data', 'alpha': 0.7} # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2, X_highlight=X_test, scatter_kwargs=scatter_kwargs, contourf_kwargs=contourf_kwargs, scatter_highlight_kwargs=scatter_highlight_kwargs) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show()","title":"Example 9 - Customizing the plotting style"},{"location":"user_guide/plotting/plot_decision_regions/#example-10-providing-your-own-legend-labels","text":"Custom legend labels can be provided by returning the axis object(s) from the plot_decision_region function and then getting the handles and labels of the legend. Custom handles (i.e., labels) can then be provided via ax.legend ax = plot_decision_regions(X, y, clf=svm, legend=0) handles, labels = ax.get_legend_handles_labels() ax.legend(handles, ['class 0', 'class 1', 'class 2'], framealpha=0.3, scatterpoints=1) An example is shown below. from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions ax = plot_decision_regions(X, y, clf=svm, legend=0) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') handles, labels = ax.get_legend_handles_labels() ax.legend(handles, ['class square', 'class triangle', 'class circle'], framealpha=0.3, scatterpoints=1) plt.show()","title":"Example 10 - Providing your own legend labels"},{"location":"user_guide/plotting/plot_decision_regions/#api","text":"plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/","title":"API"},{"location":"user_guide/plotting/plot_learning_curves/","text":"Plotting Learning Curves A function to plot learning curves for classifiers. Learning curves are extremely useful to analyze if a model is suffering from over- or under-fitting (high variance or high bias). The function can be imported via from mlxtend.plotting import plot_learning_curves References - Example 1 from mlxtend.plotting import plot_learning_curves import matplotlib.pyplot as plt from mlxtend.data import iris_data from mlxtend.preprocessing import shuffle_arrays_unison from sklearn.neighbors import KNeighborsClassifier import numpy as np # Loading some example data X, y = iris_data() X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=123) X_train, X_test = X[:100], X[100:] y_train, y_test = y[:100], y[100:] clf = KNeighborsClassifier(n_neighbors=5) plot_learning_curves(X_train, y_train, X_test, y_test, clf) plt.show() API plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/learning_curves/","title":"Plotting Learning Curves"},{"location":"user_guide/plotting/plot_learning_curves/#plotting-learning-curves","text":"A function to plot learning curves for classifiers. Learning curves are extremely useful to analyze if a model is suffering from over- or under-fitting (high variance or high bias). The function can be imported via from mlxtend.plotting import plot_learning_curves","title":"Plotting Learning Curves"},{"location":"user_guide/plotting/plot_learning_curves/#references","text":"-","title":"References"},{"location":"user_guide/plotting/plot_learning_curves/#example-1","text":"from mlxtend.plotting import plot_learning_curves import matplotlib.pyplot as plt from mlxtend.data import iris_data from mlxtend.preprocessing import shuffle_arrays_unison from sklearn.neighbors import KNeighborsClassifier import numpy as np # Loading some example data X, y = iris_data() X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=123) X_train, X_test = X[:100], X[100:] y_train, y_test = y[:100], y[100:] clf = KNeighborsClassifier(n_neighbors=5) plot_learning_curves(X_train, y_train, X_test, y_test, clf) plt.show()","title":"Example 1"},{"location":"user_guide/plotting/plot_learning_curves/#api","text":"plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/learning_curves/","title":"API"},{"location":"user_guide/plotting/plot_linear_regression/","text":"Linear Regression Plot A function to plot linear regression fits. from mlxtend.plotting import plot_linear_regression Overview The plot_linear_regression is a convenience function that uses scikit-learn's linear_model.LinearRegression to fit a linear model and SciPy's stats.pearsonr to calculate the correlation coefficient. References - Example 1 - Ordinary Least Squares Simple Linear Regression import matplotlib.pyplot as plt from mlxtend.plotting import plot_linear_regression import numpy as np X = np.array([4, 8, 13, 26, 31, 10, 8, 30, 18, 12, 20, 5, 28, 18, 6, 31, 12, 12, 27, 11, 6, 14, 25, 7, 13,4, 15, 21, 15]) y = np.array([14, 24, 22, 59, 66, 25, 18, 60, 39, 32, 53, 18, 55, 41, 28, 61, 35, 36, 52, 23, 19, 25, 73, 16, 32, 14, 31, 43, 34]) intercept, slope, corr_coeff = plot_linear_regression(X, y) plt.show() API plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/","title":"Linear Regression Plot"},{"location":"user_guide/plotting/plot_linear_regression/#linear-regression-plot","text":"A function to plot linear regression fits. from mlxtend.plotting import plot_linear_regression","title":"Linear Regression Plot"},{"location":"user_guide/plotting/plot_linear_regression/#overview","text":"The plot_linear_regression is a convenience function that uses scikit-learn's linear_model.LinearRegression to fit a linear model and SciPy's stats.pearsonr to calculate the correlation coefficient.","title":"Overview"},{"location":"user_guide/plotting/plot_linear_regression/#references","text":"-","title":"References"},{"location":"user_guide/plotting/plot_linear_regression/#example-1-ordinary-least-squares-simple-linear-regression","text":"import matplotlib.pyplot as plt from mlxtend.plotting import plot_linear_regression import numpy as np X = np.array([4, 8, 13, 26, 31, 10, 8, 30, 18, 12, 20, 5, 28, 18, 6, 31, 12, 12, 27, 11, 6, 14, 25, 7, 13,4, 15, 21, 15]) y = np.array([14, 24, 22, 59, 66, 25, 18, 60, 39, 32, 53, 18, 55, 41, 28, 61, 35, 36, 52, 23, 19, 25, 73, 16, 32, 14, 31, 43, 34]) intercept, slope, corr_coeff = plot_linear_regression(X, y) plt.show()","title":"Example 1 - Ordinary Least Squares Simple Linear Regression"},{"location":"user_guide/plotting/plot_linear_regression/#api","text":"plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/","title":"API"},{"location":"user_guide/plotting/plot_sequential_feature_selection/","text":"Plot Sequential Feature Selection A matplotlib utility function for visualizing results from feature_selection.SequentialFeatureSelector . from mlxtend.plotting import plot_sequential_feature_selection Overview for more information on sequential feature selection, please see feature_selection.SequentialFeatureSelector . Example 1 - Plotting the results from SequentialFeatureSelector from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs from mlxtend.feature_selection import SequentialFeatureSelector as SFS import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) sfs = SFS(knn, k_features=4, forward=True, floating=False, scoring='accuracy', cv=5) sfs = sfs.fit(X, y) fig1 = plot_sfs(sfs.get_metric_dict(), kind='std_dev') plt.ylim([0.8, 1]) plt.title('Sequential Forward Selection (w. StdDev)') plt.grid() plt.show() Features: 4/4 API plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/","title":"Plot Sequential Feature Selection"},{"location":"user_guide/plotting/plot_sequential_feature_selection/#plot-sequential-feature-selection","text":"A matplotlib utility function for visualizing results from feature_selection.SequentialFeatureSelector . from mlxtend.plotting import plot_sequential_feature_selection","title":"Plot Sequential Feature Selection"},{"location":"user_guide/plotting/plot_sequential_feature_selection/#overview","text":"for more information on sequential feature selection, please see feature_selection.SequentialFeatureSelector .","title":"Overview"},{"location":"user_guide/plotting/plot_sequential_feature_selection/#example-1-plotting-the-results-from-sequentialfeatureselector","text":"from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs from mlxtend.feature_selection import SequentialFeatureSelector as SFS import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) sfs = SFS(knn, k_features=4, forward=True, floating=False, scoring='accuracy', cv=5) sfs = sfs.fit(X, y) fig1 = plot_sfs(sfs.get_metric_dict(), kind='std_dev') plt.ylim([0.8, 1]) plt.title('Sequential Forward Selection (w. StdDev)') plt.grid() plt.show() Features: 4/4","title":"Example 1 - Plotting the results from SequentialFeatureSelector"},{"location":"user_guide/plotting/plot_sequential_feature_selection/#api","text":"plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/","title":"API"},{"location":"user_guide/plotting/scatterplotmatrix/","text":"Scatter Plot Matrix A function to conveniently plot stacked bar plots in matplotlib using pandas DataFrame s. from mlxtend.plotting import scatterplotmatrix Overview A matplotlib convenience function for creating a scatterplot matrix. References - Example 1 - Simple Scatter Plot Matrix import matplotlib.pyplot as plt from mlxtend.data import iris_data from mlxtend.plotting import scatterplotmatrix X, y = iris_data() scatterplotmatrix(X, figsize=(10, 8)) plt.tight_layout() plt.show() Example 2 - Scatter Plot Matrix with Multiple Categories names = ['sepal length [cm]', 'sepal width [cm]', 'petal length [cm]', 'petal width [cm]'] fig, axes = scatterplotmatrix(X[y==0], figsize=(10, 8), alpha=0.5) fig, axes = scatterplotmatrix(X[y==1], fig_axes=(fig, axes), alpha=0.5) fig, axes = scatterplotmatrix(X[y==2], fig_axes=(fig, axes), alpha=0.5, names=names) plt.tight_layout() plt.show() API scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...)","title":"Scatter Plot Matrix"},{"location":"user_guide/plotting/scatterplotmatrix/#scatter-plot-matrix","text":"A function to conveniently plot stacked bar plots in matplotlib using pandas DataFrame s. from mlxtend.plotting import scatterplotmatrix","title":"Scatter Plot Matrix"},{"location":"user_guide/plotting/scatterplotmatrix/#overview","text":"A matplotlib convenience function for creating a scatterplot matrix.","title":"Overview"},{"location":"user_guide/plotting/scatterplotmatrix/#references","text":"-","title":"References"},{"location":"user_guide/plotting/scatterplotmatrix/#example-1-simple-scatter-plot-matrix","text":"import matplotlib.pyplot as plt from mlxtend.data import iris_data from mlxtend.plotting import scatterplotmatrix X, y = iris_data() scatterplotmatrix(X, figsize=(10, 8)) plt.tight_layout() plt.show()","title":"Example 1 - Simple Scatter Plot Matrix"},{"location":"user_guide/plotting/scatterplotmatrix/#example-2-scatter-plot-matrix-with-multiple-categories","text":"names = ['sepal length [cm]', 'sepal width [cm]', 'petal length [cm]', 'petal width [cm]'] fig, axes = scatterplotmatrix(X[y==0], figsize=(10, 8), alpha=0.5) fig, axes = scatterplotmatrix(X[y==1], fig_axes=(fig, axes), alpha=0.5) fig, axes = scatterplotmatrix(X[y==2], fig_axes=(fig, axes), alpha=0.5, names=names) plt.tight_layout() plt.show()","title":"Example 2 - Scatter Plot Matrix with Multiple Categories"},{"location":"user_guide/plotting/scatterplotmatrix/#api","text":"scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...)","title":"API"},{"location":"user_guide/plotting/stacked_barplot/","text":"Stacked Barplot A function to conveniently plot stacked bar plots in matplotlib using pandas DataFrame s. from mlxtend.plotting import category_scatter Overview A matplotlib convenience function for creating barplots from DataFrames where each sample is associated with several categories. References - Example 1 - Stacked Barplot from Pandas DataFrames import pandas as pd s1 = [1.0, 2.0, 3.0, 4.0] s2 = [1.4, 2.1, 2.9, 5.1] s3 = [1.9, 2.2, 3.5, 4.1] s4 = [1.4, 2.5, 3.5, 4.2] data = [s1, s2, s3, s4] df = pd.DataFrame(data, columns=['X1', 'X2', 'X3', 'X4']) df.columns = ['X1', 'X2', 'X3', 'X4'] df.index = ['Sample1', 'Sample2', 'Sample3', 'Sample4'] df X1 X2 X3 X4 Sample1 1.0 2.0 3.0 4.0 Sample2 1.4 2.1 2.9 5.1 Sample3 1.9 2.2 3.5 4.1 Sample4 1.4 2.5 3.5 4.2 By default, the index of the DataFrame is used as column labels, and the DataFrame columns are used for the plot legend. import matplotlib.pyplot as plt from mlxtend.plotting import stacked_barplot fig = stacked_barplot(df, rotation=45, legend_loc='best') API stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"Stacked Barplot"},{"location":"user_guide/plotting/stacked_barplot/#stacked-barplot","text":"A function to conveniently plot stacked bar plots in matplotlib using pandas DataFrame s. from mlxtend.plotting import category_scatter","title":"Stacked Barplot"},{"location":"user_guide/plotting/stacked_barplot/#overview","text":"A matplotlib convenience function for creating barplots from DataFrames where each sample is associated with several categories.","title":"Overview"},{"location":"user_guide/plotting/stacked_barplot/#references","text":"-","title":"References"},{"location":"user_guide/plotting/stacked_barplot/#example-1-stacked-barplot-from-pandas-dataframes","text":"import pandas as pd s1 = [1.0, 2.0, 3.0, 4.0] s2 = [1.4, 2.1, 2.9, 5.1] s3 = [1.9, 2.2, 3.5, 4.1] s4 = [1.4, 2.5, 3.5, 4.2] data = [s1, s2, s3, s4] df = pd.DataFrame(data, columns=['X1', 'X2', 'X3', 'X4']) df.columns = ['X1', 'X2', 'X3', 'X4'] df.index = ['Sample1', 'Sample2', 'Sample3', 'Sample4'] df X1 X2 X3 X4 Sample1 1.0 2.0 3.0 4.0 Sample2 1.4 2.1 2.9 5.1 Sample3 1.9 2.2 3.5 4.1 Sample4 1.4 2.5 3.5 4.2 By default, the index of the DataFrame is used as column labels, and the DataFrame columns are used for the plot legend. import matplotlib.pyplot as plt from mlxtend.plotting import stacked_barplot fig = stacked_barplot(df, rotation=45, legend_loc='best')","title":"Example 1 - Stacked Barplot from Pandas DataFrames"},{"location":"user_guide/plotting/stacked_barplot/#api","text":"stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"API"},{"location":"user_guide/preprocessing/CopyTransformer/","text":"CopyTransformer A simple transformer that returns a copy of the input array, for example, as part of a scikit-learn pipeline. from mlxtend.preprocessing import CopyTransformer Example 1 from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.feature_extraction.text import CountVectorizer from mlxtend.preprocessing import CopyTransformer import re import numpy as np X_train = np.array(['abc def ghi', 'this is a test', 'this is a test', 'this is a test']) y_train = np.array([0, 0, 1, 1]) pipe_1 = Pipeline([ ('vect', CountVectorizer()), ('to_dense', CopyTransformer()), ('clf', RandomForestClassifier()) ]) parameters_1 = dict( clf__n_estimators=[50, 100, 200], clf__max_features=['sqrt', 'log2', None],) grid_search_1 = GridSearchCV(pipe_1, parameters_1, n_jobs=1, verbose=1, scoring='accuracy', cv=2) print(\"Performing grid search...\") print(\"pipeline:\", [name for name, _ in pipe_1.steps]) print(\"parameters:\") grid_search_1.fit(X_train, y_train) print(\"Best score: %0.3f\" % grid_search_1.best_score_) print(\"Best parameters set:\") best_parameters_1 = grid_search_1.best_estimator_.get_params() for param_name in sorted(parameters_1.keys()): print(\"\\t%s: %r\" % (param_name, best_parameters_1[param_name])) Performing grid search... pipeline: ['vect', 'to_dense', 'clf'] parameters: Fitting 2 folds for each of 9 candidates, totalling 18 fits Best score: 0.500 Best parameters set: clf__max_features: 'sqrt' clf__n_estimators: 50 [Parallel(n_jobs=1)]: Done 18 out of 18 | elapsed: 2.9s finished API CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array.","title":"CopyTransformer"},{"location":"user_guide/preprocessing/CopyTransformer/#copytransformer","text":"A simple transformer that returns a copy of the input array, for example, as part of a scikit-learn pipeline. from mlxtend.preprocessing import CopyTransformer","title":"CopyTransformer"},{"location":"user_guide/preprocessing/CopyTransformer/#example-1","text":"from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.feature_extraction.text import CountVectorizer from mlxtend.preprocessing import CopyTransformer import re import numpy as np X_train = np.array(['abc def ghi', 'this is a test', 'this is a test', 'this is a test']) y_train = np.array([0, 0, 1, 1]) pipe_1 = Pipeline([ ('vect', CountVectorizer()), ('to_dense', CopyTransformer()), ('clf', RandomForestClassifier()) ]) parameters_1 = dict( clf__n_estimators=[50, 100, 200], clf__max_features=['sqrt', 'log2', None],) grid_search_1 = GridSearchCV(pipe_1, parameters_1, n_jobs=1, verbose=1, scoring='accuracy', cv=2) print(\"Performing grid search...\") print(\"pipeline:\", [name for name, _ in pipe_1.steps]) print(\"parameters:\") grid_search_1.fit(X_train, y_train) print(\"Best score: %0.3f\" % grid_search_1.best_score_) print(\"Best parameters set:\") best_parameters_1 = grid_search_1.best_estimator_.get_params() for param_name in sorted(parameters_1.keys()): print(\"\\t%s: %r\" % (param_name, best_parameters_1[param_name])) Performing grid search... pipeline: ['vect', 'to_dense', 'clf'] parameters: Fitting 2 folds for each of 9 candidates, totalling 18 fits Best score: 0.500 Best parameters set: clf__max_features: 'sqrt' clf__n_estimators: 50 [Parallel(n_jobs=1)]: Done 18 out of 18 | elapsed: 2.9s finished","title":"Example 1"},{"location":"user_guide/preprocessing/CopyTransformer/#api","text":"CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/","title":"API"},{"location":"user_guide/preprocessing/CopyTransformer/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array.","title":"Methods"},{"location":"user_guide/preprocessing/DenseTransformer/","text":"DenseTransformer A simple transformer that converts a sparse into a dense numpy array, e.g., required for scikit-learn's Pipeline when, for example, CountVectorizers are used in combination with estimators that are not compatible with sparse matrices. from mlxtend.preprocessing import DenseTransformer Example 1 from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.feature_extraction.text import CountVectorizer from mlxtend.preprocessing import DenseTransformer import re import numpy as np X_train = np.array(['abc def ghi', 'this is a test', 'this is a test', 'this is a test']) y_train = np.array([0, 0, 1, 1]) pipe_1 = Pipeline([ ('vect', CountVectorizer()), ('to_dense', DenseTransformer()), ('clf', RandomForestClassifier()) ]) parameters_1 = dict( clf__n_estimators=[50, 100, 200], clf__max_features=['sqrt', 'log2', None],) grid_search_1 = GridSearchCV(pipe_1, parameters_1, n_jobs=1, verbose=1, scoring='accuracy', cv=2) print(\"Performing grid search...\") print(\"pipeline:\", [name for name, _ in pipe_1.steps]) print(\"parameters:\") grid_search_1.fit(X_train, y_train) print(\"Best score: %0.3f\" % grid_search_1.best_score_) print(\"Best parameters set:\") best_parameters_1 = grid_search_1.best_estimator_.get_params() for param_name in sorted(parameters_1.keys()): print(\"\\t%s: %r\" % (param_name, best_parameters_1[param_name])) Performing grid search... pipeline: ['vect', 'to_dense', 'clf'] parameters: Fitting 2 folds for each of 9 candidates, totalling 18 fits Best score: 0.500 Best parameters set: clf__max_features: 'sqrt' clf__n_estimators: 50 [Parallel(n_jobs=1)]: Done 18 out of 18 | elapsed: 3.9s finished API DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array.","title":"DenseTransformer"},{"location":"user_guide/preprocessing/DenseTransformer/#densetransformer","text":"A simple transformer that converts a sparse into a dense numpy array, e.g., required for scikit-learn's Pipeline when, for example, CountVectorizers are used in combination with estimators that are not compatible with sparse matrices. from mlxtend.preprocessing import DenseTransformer","title":"DenseTransformer"},{"location":"user_guide/preprocessing/DenseTransformer/#example-1","text":"from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.feature_extraction.text import CountVectorizer from mlxtend.preprocessing import DenseTransformer import re import numpy as np X_train = np.array(['abc def ghi', 'this is a test', 'this is a test', 'this is a test']) y_train = np.array([0, 0, 1, 1]) pipe_1 = Pipeline([ ('vect', CountVectorizer()), ('to_dense', DenseTransformer()), ('clf', RandomForestClassifier()) ]) parameters_1 = dict( clf__n_estimators=[50, 100, 200], clf__max_features=['sqrt', 'log2', None],) grid_search_1 = GridSearchCV(pipe_1, parameters_1, n_jobs=1, verbose=1, scoring='accuracy', cv=2) print(\"Performing grid search...\") print(\"pipeline:\", [name for name, _ in pipe_1.steps]) print(\"parameters:\") grid_search_1.fit(X_train, y_train) print(\"Best score: %0.3f\" % grid_search_1.best_score_) print(\"Best parameters set:\") best_parameters_1 = grid_search_1.best_estimator_.get_params() for param_name in sorted(parameters_1.keys()): print(\"\\t%s: %r\" % (param_name, best_parameters_1[param_name])) Performing grid search... pipeline: ['vect', 'to_dense', 'clf'] parameters: Fitting 2 folds for each of 9 candidates, totalling 18 fits Best score: 0.500 Best parameters set: clf__max_features: 'sqrt' clf__n_estimators: 50 [Parallel(n_jobs=1)]: Done 18 out of 18 | elapsed: 3.9s finished","title":"Example 1"},{"location":"user_guide/preprocessing/DenseTransformer/#api","text":"DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/","title":"API"},{"location":"user_guide/preprocessing/DenseTransformer/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array.","title":"Methods"},{"location":"user_guide/preprocessing/MeanCenterer/","text":"Mean Centerer A transformer object that performs column-based mean centering on a NumPy array. from mlxtend.preprocessing import MeanCenterer Example 1 - Centering a NumPy Array Use the fit method to fit the column means of a dataset (e.g., the training dataset) to a new MeanCenterer object. Then, call the transform method on the same dataset to center it at the sample mean. import numpy as np from mlxtend.preprocessing import MeanCenterer X_train = np.array( [[1, 2, 3], [4, 5, 6], [7, 8, 9]]) mc = MeanCenterer().fit(X_train) mc.transform(X_train) array([[-3., -3., -3.], [ 0., 0., 0.], [ 3., 3., 3.]]) API MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/ Methods fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered.","title":"Mean Centerer"},{"location":"user_guide/preprocessing/MeanCenterer/#mean-centerer","text":"A transformer object that performs column-based mean centering on a NumPy array. from mlxtend.preprocessing import MeanCenterer","title":"Mean Centerer"},{"location":"user_guide/preprocessing/MeanCenterer/#example-1-centering-a-numpy-array","text":"Use the fit method to fit the column means of a dataset (e.g., the training dataset) to a new MeanCenterer object. Then, call the transform method on the same dataset to center it at the sample mean. import numpy as np from mlxtend.preprocessing import MeanCenterer X_train = np.array( [[1, 2, 3], [4, 5, 6], [7, 8, 9]]) mc = MeanCenterer().fit(X_train) mc.transform(X_train) array([[-3., -3., -3.], [ 0., 0., 0.], [ 3., 3., 3.]])","title":"Example 1 - Centering a NumPy Array"},{"location":"user_guide/preprocessing/MeanCenterer/#api","text":"MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/","title":"API"},{"location":"user_guide/preprocessing/MeanCenterer/#methods","text":"fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered.","title":"Methods"},{"location":"user_guide/preprocessing/TransactionEncoder/","text":"TransactionEncoder Encoder class for transaction data in Python lists from mlxtend.preprocessing import TransactionEncoder Overview Encodes database transaction data in form of a Python list of lists into a NumPy array. Example 1 Suppose we have the following transaction data: from mlxtend.preprocessing import TransactionEncoder dataset = [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] Using and TransactionEncoder object, we can transform this dataset into an array format suitable for typical machine learning APIs. Via the fit method, the TransactionEncoder learns the unique labels in the dataset, and via the transform method, it transforms the input dataset (a Python list of lists) into a one-hot encoded NumPy boolean array: te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) te_ary array([[ True, False, True, True, False, True], [ True, False, True, False, False, True], [ True, False, True, False, False, False], [ True, True, False, False, False, False], [False, False, True, True, True, True], [False, False, True, False, True, True], [False, False, True, False, True, False], [ True, True, False, False, False, False]], dtype=bool) The NumPy array is boolean for the sake of memory efficiency when working with large datasets. If a classic integer representation is desired instead, we can just convert the array to the appropriate type: te_ary.astype(\"int\") array([[1, 0, 1, 1, 0, 1], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1], [0, 0, 1, 0, 1, 1], [0, 0, 1, 0, 1, 0], [1, 1, 0, 0, 0, 0]]) After fitting, the unique column names that correspond to the data array shown above can be accessed via the columns_ attribute: te.columns_ ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] For our convenience, we can turn theencoded array into a pandas DataFrame : import pandas as pd pd.DataFrame(te_ary, columns=te.columns_) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Bananas Beer Chicken Milk Rice 0 True False True True False True 1 True False True False False True 2 True False True False False False 3 True True False False False False 4 False False True True True True 5 False False True False True True 6 False False True False True False 7 True True False False False False If we desire, we can turn the one-hot encoded array back into a transaction list of lists via the inverse_transform function: first4 = te_ary[:4] te.inverse_transform(first4) [['Apple', 'Beer', 'Chicken', 'Rice'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas']] API TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/ Methods fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"TransactionEncoder"},{"location":"user_guide/preprocessing/TransactionEncoder/#transactionencoder","text":"Encoder class for transaction data in Python lists from mlxtend.preprocessing import TransactionEncoder","title":"TransactionEncoder"},{"location":"user_guide/preprocessing/TransactionEncoder/#overview","text":"Encodes database transaction data in form of a Python list of lists into a NumPy array.","title":"Overview"},{"location":"user_guide/preprocessing/TransactionEncoder/#example-1","text":"Suppose we have the following transaction data: from mlxtend.preprocessing import TransactionEncoder dataset = [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] Using and TransactionEncoder object, we can transform this dataset into an array format suitable for typical machine learning APIs. Via the fit method, the TransactionEncoder learns the unique labels in the dataset, and via the transform method, it transforms the input dataset (a Python list of lists) into a one-hot encoded NumPy boolean array: te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) te_ary array([[ True, False, True, True, False, True], [ True, False, True, False, False, True], [ True, False, True, False, False, False], [ True, True, False, False, False, False], [False, False, True, True, True, True], [False, False, True, False, True, True], [False, False, True, False, True, False], [ True, True, False, False, False, False]], dtype=bool) The NumPy array is boolean for the sake of memory efficiency when working with large datasets. If a classic integer representation is desired instead, we can just convert the array to the appropriate type: te_ary.astype(\"int\") array([[1, 0, 1, 1, 0, 1], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1], [0, 0, 1, 0, 1, 1], [0, 0, 1, 0, 1, 0], [1, 1, 0, 0, 0, 0]]) After fitting, the unique column names that correspond to the data array shown above can be accessed via the columns_ attribute: te.columns_ ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] For our convenience, we can turn theencoded array into a pandas DataFrame : import pandas as pd pd.DataFrame(te_ary, columns=te.columns_) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Bananas Beer Chicken Milk Rice 0 True False True True False True 1 True False True False False True 2 True False True False False False 3 True True False False False False 4 False False True True True True 5 False False True False True True 6 False False True False True False 7 True True False False False False If we desire, we can turn the one-hot encoded array back into a transaction list of lists via the inverse_transform function: first4 = te_ary[:4] te.inverse_transform(first4) [['Apple', 'Beer', 'Chicken', 'Rice'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas']]","title":"Example 1"},{"location":"user_guide/preprocessing/TransactionEncoder/#api","text":"TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/","title":"API"},{"location":"user_guide/preprocessing/TransactionEncoder/#methods","text":"fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"Methods"},{"location":"user_guide/preprocessing/minmax_scaling/","text":"MinMax Scaling A function for min-max scaling of pandas DataFrames or NumPy arrays. from mlxtend.preprocessing import MinMaxScaling An alternative approach to Z-score normalization (or standardization) is the so-called Min-Max scaling (often also simply called \"normalization\" - a common cause for ambiguities). In this approach, the data is scaled to a fixed range - usually 0 to 1. The cost of having this bounded range - in contrast to standardization - is that we will end up with smaller standard deviations, which can suppress the effect of outliers. A Min-Max scaling is typically done via the following equation: X_{sc} = \\frac{X - X_{min}}{X_{max} - X_{min}}. One family of algorithms that is scale-invariant encompasses tree-based learning algorithms. Let's take the general CART decision tree algorithm. Without going into much depth regarding information gain and impurity measures, we can think of the decision as \"is feature x_i >= some_val?\" Intuitively, we can see that it really doesn't matter on which scale this feature is (centimeters, Fahrenheit, a standardized scale -- it really doesn't matter). Some examples of algorithms where feature scaling matters are: k-nearest neighbors with an Euclidean distance measure if want all features to contribute equally k-means (see k-nearest neighbors) logistic regression, SVMs, perceptrons, neural networks etc. if you are using gradient descent/ascent-based optimization, otherwise some weights will update much faster than others linear discriminant analysis, principal component analysis, kernel principal component analysis since you want to find directions of maximizing the variance (under the constraints that those directions/eigenvectors/principal components are orthogonal); you want to have features on the same scale since you'd emphasize variables on \"larger measurement scales\" more. There are many more cases than I can possibly list here ... I always recommend you to think about the algorithm and what it's doing, and then it typically becomes obvious whether we want to scale your features or not. In addition, we'd also want to think about whether we want to \"standardize\" or \"normalize\" (here: scaling to [0, 1] range) our data. Some algorithms assume that our data is centered at 0. For example, if we initialize the weights of a small multi-layer perceptron with tanh activation units to 0 or small random values centered around zero, we want to update the model weights \"equally.\" As a rule of thumb I'd say: When in doubt, just standardize the data, it shouldn't hurt. Example 1 - Scaling a Pandas DataFrame import pandas as pd s1 = pd.Series([1, 2, 3, 4, 5, 6], index=(range(6))) s2 = pd.Series([10, 9, 8, 7, 6, 5], index=(range(6))) df = pd.DataFrame(s1, columns=['s1']) df['s2'] = s2 df s1 s2 0 1 10 1 2 9 2 3 8 3 4 7 4 5 6 5 6 5 from mlxtend.preprocessing import minmax_scaling minmax_scaling(df, columns=['s1', 's2']) s1 s2 0 0.0 1.0 1 0.2 0.8 2 0.4 0.6 3 0.6 0.4 4 0.8 0.2 5 1.0 0.0 Example 2 - Scaling a NumPy Array import numpy as np X = np.array([[1, 10], [2, 9], [3, 8], [4, 7], [5, 6], [6, 5]]) X array([[ 1, 10], [ 2, 9], [ 3, 8], [ 4, 7], [ 5, 6], [ 6, 5]]) from mlxtend.preprocessing import minmax_scaling minmax_scaling(X, columns=[0, 1]) array([[ 0. , 1. ], [ 0.2, 0.8], [ 0.4, 0.6], [ 0.6, 0.4], [ 0.8, 0.2], [ 1. , 0. ]]) API minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/","title":"MinMax Scaling"},{"location":"user_guide/preprocessing/minmax_scaling/#minmax-scaling","text":"A function for min-max scaling of pandas DataFrames or NumPy arrays. from mlxtend.preprocessing import MinMaxScaling An alternative approach to Z-score normalization (or standardization) is the so-called Min-Max scaling (often also simply called \"normalization\" - a common cause for ambiguities). In this approach, the data is scaled to a fixed range - usually 0 to 1. The cost of having this bounded range - in contrast to standardization - is that we will end up with smaller standard deviations, which can suppress the effect of outliers. A Min-Max scaling is typically done via the following equation: X_{sc} = \\frac{X - X_{min}}{X_{max} - X_{min}}. One family of algorithms that is scale-invariant encompasses tree-based learning algorithms. Let's take the general CART decision tree algorithm. Without going into much depth regarding information gain and impurity measures, we can think of the decision as \"is feature x_i >= some_val?\" Intuitively, we can see that it really doesn't matter on which scale this feature is (centimeters, Fahrenheit, a standardized scale -- it really doesn't matter). Some examples of algorithms where feature scaling matters are: k-nearest neighbors with an Euclidean distance measure if want all features to contribute equally k-means (see k-nearest neighbors) logistic regression, SVMs, perceptrons, neural networks etc. if you are using gradient descent/ascent-based optimization, otherwise some weights will update much faster than others linear discriminant analysis, principal component analysis, kernel principal component analysis since you want to find directions of maximizing the variance (under the constraints that those directions/eigenvectors/principal components are orthogonal); you want to have features on the same scale since you'd emphasize variables on \"larger measurement scales\" more. There are many more cases than I can possibly list here ... I always recommend you to think about the algorithm and what it's doing, and then it typically becomes obvious whether we want to scale your features or not. In addition, we'd also want to think about whether we want to \"standardize\" or \"normalize\" (here: scaling to [0, 1] range) our data. Some algorithms assume that our data is centered at 0. For example, if we initialize the weights of a small multi-layer perceptron with tanh activation units to 0 or small random values centered around zero, we want to update the model weights \"equally.\" As a rule of thumb I'd say: When in doubt, just standardize the data, it shouldn't hurt.","title":"MinMax Scaling"},{"location":"user_guide/preprocessing/minmax_scaling/#example-1-scaling-a-pandas-dataframe","text":"import pandas as pd s1 = pd.Series([1, 2, 3, 4, 5, 6], index=(range(6))) s2 = pd.Series([10, 9, 8, 7, 6, 5], index=(range(6))) df = pd.DataFrame(s1, columns=['s1']) df['s2'] = s2 df s1 s2 0 1 10 1 2 9 2 3 8 3 4 7 4 5 6 5 6 5 from mlxtend.preprocessing import minmax_scaling minmax_scaling(df, columns=['s1', 's2']) s1 s2 0 0.0 1.0 1 0.2 0.8 2 0.4 0.6 3 0.6 0.4 4 0.8 0.2 5 1.0 0.0","title":"Example 1 - Scaling a Pandas DataFrame"},{"location":"user_guide/preprocessing/minmax_scaling/#example-2-scaling-a-numpy-array","text":"import numpy as np X = np.array([[1, 10], [2, 9], [3, 8], [4, 7], [5, 6], [6, 5]]) X array([[ 1, 10], [ 2, 9], [ 3, 8], [ 4, 7], [ 5, 6], [ 6, 5]]) from mlxtend.preprocessing import minmax_scaling minmax_scaling(X, columns=[0, 1]) array([[ 0. , 1. ], [ 0.2, 0.8], [ 0.4, 0.6], [ 0.6, 0.4], [ 0.8, 0.2], [ 1. , 0. ]])","title":"Example 2 - Scaling a NumPy Array"},{"location":"user_guide/preprocessing/minmax_scaling/#api","text":"minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/","title":"API"},{"location":"user_guide/preprocessing/one-hot_encoding/","text":"One-Hot Encoding A function that performs one-hot encoding for class labels. from mlxtend.preprocessing import one_hot Overview Typical supervised machine learning algorithms for classifications assume that the class labels are nominal (a special case of categorical where no order is implied). A typical example of an nominal feature would be \"color\" since we can't say (in most applications) that \"orange > blue > red\". The one_hot function provides a simple interface to convert class label integers into a so-called one-hot array, where each unique label is represented as a column in the new array. For example, let's assume we have 5 data points from 3 different classes: 0, 1, and 2. y = [0, # sample 1, class 0 1, # sample 2, class 1 0, # sample 3, class 0 2, # sample 4, class 2 2] # sample 5, class 2 After one-hot encoding, we then obtain the following array (note that the index position of the \"1\" in each row denotes the class label of this sample): y = [[1, 0, 0], # sample 1, class 0 [0, 1, 0], # sample 2, class 1 [1, 0, 0], # sample 3, class 0 [0, 0, 1], # sample 4, class 2 [0, 0, 1] # sample 5, class 2 ]) Example 1 - Defaults from mlxtend.preprocessing import one_hot import numpy as np y = np.array([0, 1, 2, 1, 2]) one_hot(y) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.], [ 0., 1., 0.], [ 0., 0., 1.]]) Example 2 - Python Lists from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.], [ 0., 1., 0.], [ 0., 0., 1.]]) Example 3 - Integer Arrays from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y, dtype='int') array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]) Example 4 - Arbitrary Numbers of Class Labels from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y, num_labels=10) array([[ 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]]) API one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/","title":"One hot encoding"},{"location":"user_guide/preprocessing/one-hot_encoding/#one-hot-encoding","text":"A function that performs one-hot encoding for class labels. from mlxtend.preprocessing import one_hot","title":"One-Hot Encoding"},{"location":"user_guide/preprocessing/one-hot_encoding/#overview","text":"Typical supervised machine learning algorithms for classifications assume that the class labels are nominal (a special case of categorical where no order is implied). A typical example of an nominal feature would be \"color\" since we can't say (in most applications) that \"orange > blue > red\". The one_hot function provides a simple interface to convert class label integers into a so-called one-hot array, where each unique label is represented as a column in the new array. For example, let's assume we have 5 data points from 3 different classes: 0, 1, and 2. y = [0, # sample 1, class 0 1, # sample 2, class 1 0, # sample 3, class 0 2, # sample 4, class 2 2] # sample 5, class 2 After one-hot encoding, we then obtain the following array (note that the index position of the \"1\" in each row denotes the class label of this sample): y = [[1, 0, 0], # sample 1, class 0 [0, 1, 0], # sample 2, class 1 [1, 0, 0], # sample 3, class 0 [0, 0, 1], # sample 4, class 2 [0, 0, 1] # sample 5, class 2 ])","title":"Overview"},{"location":"user_guide/preprocessing/one-hot_encoding/#example-1-defaults","text":"from mlxtend.preprocessing import one_hot import numpy as np y = np.array([0, 1, 2, 1, 2]) one_hot(y) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.], [ 0., 1., 0.], [ 0., 0., 1.]])","title":"Example 1 - Defaults"},{"location":"user_guide/preprocessing/one-hot_encoding/#example-2-python-lists","text":"from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.], [ 0., 1., 0.], [ 0., 0., 1.]])","title":"Example 2 - Python Lists"},{"location":"user_guide/preprocessing/one-hot_encoding/#example-3-integer-arrays","text":"from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y, dtype='int') array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]])","title":"Example 3 - Integer Arrays"},{"location":"user_guide/preprocessing/one-hot_encoding/#example-4-arbitrary-numbers-of-class-labels","text":"from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y, num_labels=10) array([[ 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]])","title":"Example 4 - Arbitrary Numbers of Class Labels"},{"location":"user_guide/preprocessing/one-hot_encoding/#api","text":"one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/","title":"API"},{"location":"user_guide/preprocessing/shuffle_arrays_unison/","text":"Shuffle Arrays in Unison A function for NumPy arrays in unison. from mlxtend.preprocessing import shuffle_arrays_unison Example 1 - Scaling a Pandas DataFrame import numpy as np from mlxtend.preprocessing import shuffle_arrays_unison X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) y = np.array([1, 2, 3]) print('X:\\n%s' % X) print('y:\\n%s' % y) X: [[1 2 3] [4 5 6] [7 8 9]] y: [1 2 3] X2, y2 = shuffle_arrays_unison(arrays=[X, y], random_seed=3) print('X2:\\n%s' % X2) print('y2:\\n%s' % y2) X2: [[4 5 6] [1 2 3] [7 8 9]] y2: [2 1 3] API shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/","title":"Shuffle Arrays in Unison"},{"location":"user_guide/preprocessing/shuffle_arrays_unison/#shuffle-arrays-in-unison","text":"A function for NumPy arrays in unison. from mlxtend.preprocessing import shuffle_arrays_unison","title":"Shuffle Arrays in Unison"},{"location":"user_guide/preprocessing/shuffle_arrays_unison/#example-1-scaling-a-pandas-dataframe","text":"import numpy as np from mlxtend.preprocessing import shuffle_arrays_unison X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) y = np.array([1, 2, 3]) print('X:\\n%s' % X) print('y:\\n%s' % y) X: [[1 2 3] [4 5 6] [7 8 9]] y: [1 2 3] X2, y2 = shuffle_arrays_unison(arrays=[X, y], random_seed=3) print('X2:\\n%s' % X2) print('y2:\\n%s' % y2) X2: [[4 5 6] [1 2 3] [7 8 9]] y2: [2 1 3]","title":"Example 1 - Scaling a Pandas DataFrame"},{"location":"user_guide/preprocessing/shuffle_arrays_unison/#api","text":"shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/","title":"API"},{"location":"user_guide/preprocessing/standardize/","text":"Standardize A function that performs column-based standardization on a NumPy array. from mlxtend.preprocessing import standardize Overview The result of standardization (or Z-score normalization) is that the features will be rescaled so that they'll have the properties of a standard normal distribution with \\mu = 0 and \\sigma = 1 . where \\mu is the mean (average) and \\sigma is the standard deviation from the mean; standard scores (also called z scores) of the samples are calculated as z=\\frac{x-\\mu}{\\sigma}. Standardizing the features so that they are centered around 0 with a standard deviation of 1 is not only important if we are comparing measurements that have different units, but it is also a general requirement for the optimal performance of many machine learning algorithms. One family of algorithms that is scale-invariant encompasses tree-based learning algorithms. Let's take the general CART decision tree algorithm. Without going into much depth regarding information gain and impurity measures, we can think of the decision as \"is feature x_i >= some_val?\" Intuitively, we can see that it really doesn't matter on which scale this feature is (centimeters, Fahrenheit, a standardized scale -- it really doesn't matter). Some examples of algorithms where feature scaling matters are: k-nearest neighbors with an Euclidean distance measure if want all features to contribute equally k-means (see k-nearest neighbors) logistic regression, SVMs, perceptrons, neural networks etc. if you are using gradient descent/ascent-based optimization, otherwise some weights will update much faster than others linear discriminant analysis, principal component analysis, kernel principal component analysis since you want to find directions of maximizing the variance (under the constraints that those directions/eigenvectors/principal components are orthogonal); you want to have features on the same scale since you'd emphasize variables on \"larger measurement scales\" more. There are many more cases than I can possibly list here ... I always recommend you to think about the algorithm and what it's doing, and then it typically becomes obvious whether we want to scale your features or not. In addition, we'd also want to think about whether we want to \"standardize\" or \"normalize\" (here: scaling to [0, 1] range) our data. Some algorithms assume that our data is centered at 0. For example, if we initialize the weights of a small multi-layer perceptron with tanh activation units to 0 or small random values centered around zero, we want to update the model weights \"equally.\" As a rule of thumb I'd say: When in doubt, just standardize the data, it shouldn't hurt. Example 1 - Standardize a Pandas DataFrame import pandas as pd s1 = pd.Series([1, 2, 3, 4, 5, 6], index=(range(6))) s2 = pd.Series([10, 9, 8, 7, 6, 5], index=(range(6))) df = pd.DataFrame(s1, columns=['s1']) df['s2'] = s2 df s1 s2 0 1 10 1 2 9 2 3 8 3 4 7 4 5 6 5 6 5 from mlxtend.preprocessing import standardize standardize(df, columns=['s1', 's2']) s1 s2 0 -1.46385 1.46385 1 -0.87831 0.87831 2 -0.29277 0.29277 3 0.29277 -0.29277 4 0.87831 -0.87831 5 1.46385 -1.46385 Example 2 - Standardize a NumPy Array import numpy as np X = np.array([[1, 10], [2, 9], [3, 8], [4, 7], [5, 6], [6, 5]]) X array([[ 1, 10], [ 2, 9], [ 3, 8], [ 4, 7], [ 5, 6], [ 6, 5]]) from mlxtend.preprocessing import standardize standardize(X, columns=[0, 1]) array([[-1.46385011, 1.46385011], [-0.87831007, 0.87831007], [-0.29277002, 0.29277002], [ 0.29277002, -0.29277002], [ 0.87831007, -0.87831007], [ 1.46385011, -1.46385011]]) Example 3 - Re-using parameters In machine learning contexts, it is desired to re-use the parameters that have been obtained from a training set to scale new, future data (including the independent test set). By setting return_params=True , the standardize function returns a second object, a parameter dictionary containing the column means and standard deviations that can be re-used by feeding it to the params parameter upon function call. import numpy as np from mlxtend.preprocessing import standardize X_train = np.array([[1, 10], [4, 7], [3, 8]]) X_test = np.array([[1, 2], [3, 4], [5, 6]]) X_train_std, params = standardize(X_train, columns=[0, 1], return_params=True) X_train_std array([[-1.33630621, 1.33630621], [ 1.06904497, -1.06904497], [ 0.26726124, -0.26726124]]) params {'avgs': array([ 2.66666667, 8.33333333]), 'stds': array([ 1.24721913, 1.24721913])} X_test_std = standardize(X_test, columns=[0, 1], params=params) X_test_std array([[-1.33630621, -5.0779636 ], [ 0.26726124, -3.47439614], [ 1.87082869, -1.87082869]]) API standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"Standardize"},{"location":"user_guide/preprocessing/standardize/#standardize","text":"A function that performs column-based standardization on a NumPy array. from mlxtend.preprocessing import standardize","title":"Standardize"},{"location":"user_guide/preprocessing/standardize/#overview","text":"The result of standardization (or Z-score normalization) is that the features will be rescaled so that they'll have the properties of a standard normal distribution with \\mu = 0 and \\sigma = 1 . where \\mu is the mean (average) and \\sigma is the standard deviation from the mean; standard scores (also called z scores) of the samples are calculated as z=\\frac{x-\\mu}{\\sigma}. Standardizing the features so that they are centered around 0 with a standard deviation of 1 is not only important if we are comparing measurements that have different units, but it is also a general requirement for the optimal performance of many machine learning algorithms. One family of algorithms that is scale-invariant encompasses tree-based learning algorithms. Let's take the general CART decision tree algorithm. Without going into much depth regarding information gain and impurity measures, we can think of the decision as \"is feature x_i >= some_val?\" Intuitively, we can see that it really doesn't matter on which scale this feature is (centimeters, Fahrenheit, a standardized scale -- it really doesn't matter). Some examples of algorithms where feature scaling matters are: k-nearest neighbors with an Euclidean distance measure if want all features to contribute equally k-means (see k-nearest neighbors) logistic regression, SVMs, perceptrons, neural networks etc. if you are using gradient descent/ascent-based optimization, otherwise some weights will update much faster than others linear discriminant analysis, principal component analysis, kernel principal component analysis since you want to find directions of maximizing the variance (under the constraints that those directions/eigenvectors/principal components are orthogonal); you want to have features on the same scale since you'd emphasize variables on \"larger measurement scales\" more. There are many more cases than I can possibly list here ... I always recommend you to think about the algorithm and what it's doing, and then it typically becomes obvious whether we want to scale your features or not. In addition, we'd also want to think about whether we want to \"standardize\" or \"normalize\" (here: scaling to [0, 1] range) our data. Some algorithms assume that our data is centered at 0. For example, if we initialize the weights of a small multi-layer perceptron with tanh activation units to 0 or small random values centered around zero, we want to update the model weights \"equally.\" As a rule of thumb I'd say: When in doubt, just standardize the data, it shouldn't hurt.","title":"Overview"},{"location":"user_guide/preprocessing/standardize/#example-1-standardize-a-pandas-dataframe","text":"import pandas as pd s1 = pd.Series([1, 2, 3, 4, 5, 6], index=(range(6))) s2 = pd.Series([10, 9, 8, 7, 6, 5], index=(range(6))) df = pd.DataFrame(s1, columns=['s1']) df['s2'] = s2 df s1 s2 0 1 10 1 2 9 2 3 8 3 4 7 4 5 6 5 6 5 from mlxtend.preprocessing import standardize standardize(df, columns=['s1', 's2']) s1 s2 0 -1.46385 1.46385 1 -0.87831 0.87831 2 -0.29277 0.29277 3 0.29277 -0.29277 4 0.87831 -0.87831 5 1.46385 -1.46385","title":"Example 1 - Standardize a Pandas DataFrame"},{"location":"user_guide/preprocessing/standardize/#example-2-standardize-a-numpy-array","text":"import numpy as np X = np.array([[1, 10], [2, 9], [3, 8], [4, 7], [5, 6], [6, 5]]) X array([[ 1, 10], [ 2, 9], [ 3, 8], [ 4, 7], [ 5, 6], [ 6, 5]]) from mlxtend.preprocessing import standardize standardize(X, columns=[0, 1]) array([[-1.46385011, 1.46385011], [-0.87831007, 0.87831007], [-0.29277002, 0.29277002], [ 0.29277002, -0.29277002], [ 0.87831007, -0.87831007], [ 1.46385011, -1.46385011]])","title":"Example 2 - Standardize a NumPy Array"},{"location":"user_guide/preprocessing/standardize/#example-3-re-using-parameters","text":"In machine learning contexts, it is desired to re-use the parameters that have been obtained from a training set to scale new, future data (including the independent test set). By setting return_params=True , the standardize function returns a second object, a parameter dictionary containing the column means and standard deviations that can be re-used by feeding it to the params parameter upon function call. import numpy as np from mlxtend.preprocessing import standardize X_train = np.array([[1, 10], [4, 7], [3, 8]]) X_test = np.array([[1, 2], [3, 4], [5, 6]]) X_train_std, params = standardize(X_train, columns=[0, 1], return_params=True) X_train_std array([[-1.33630621, 1.33630621], [ 1.06904497, -1.06904497], [ 0.26726124, -0.26726124]]) params {'avgs': array([ 2.66666667, 8.33333333]), 'stds': array([ 1.24721913, 1.24721913])} X_test_std = standardize(X_test, columns=[0, 1], params=params) X_test_std array([[-1.33630621, -5.0779636 ], [ 0.26726124, -3.47439614], [ 1.87082869, -1.87082869]])","title":"Example 3 - Re-using parameters"},{"location":"user_guide/preprocessing/standardize/#api","text":"standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"API"},{"location":"user_guide/regressor/LinearRegression/","text":"LinearRegression A implementation of Ordinary Least Squares simple and multiple linear regression. from mlxtend.regressor import LinearRegression Overview Illustration of a simple linear regression model: In Ordinary Least Squares (OLS) Linear Regression, our goal is to find the line (or hyperplane) that minimizes the vertical offsets. Or in other words, we define the best-fitting line as the line that minimizes the sum of squared errors (SSE) or mean squared error (MSE) between our target variable (y) and our predicted output over all samples i in our dataset of size n . SSE = \\sum_i \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 MSE = \\frac{1}{n} \\times SSE Now, LinearRegression implements a linear regression model for performing ordinary least squares regression using one of the following three approaches: Normal Equations Gradient Descent Stochastic Gradient Descent Normal Equations (closed-form solution) The closed-form solution should be preferred for \"smaller\" datasets where calculating (a \"costly\") matrix inverse is not a concern. For very large datasets, or datasets where the inverse of [X^T X] may not exist (the matrix is non-invertible or singular, e.g., in case of perfect multicollinearity), the gradient descent or stochastic gradient descent approaches are to be preferred. The linear function (linear regression model) is defined as: y = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{i=0}^{n} = \\mathbf{w}^T\\mathbf{x} where y is the response variable, \\mathbf{x} is an m -dimensional sample vector, and \\mathbf{w} is the weight vector (vector of coefficients). Note that w_0 represents the y-axis intercept of the model and therefore x_0=1 . Using the closed-form solution (normal equation), we compute the weights of the model as follows: \\mathbf{w} = (\\mathbf{X}^T\\mathbf{X})^{-1}\\mathbf{X}^Ty Gradient Descent (GD) and Stochastic Gradient Descent (SGD) See Gradient Descent and Stochastic Gradient Descent and Deriving the Gradient Descent Rule for Linear Regression and Adaline for details. Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates References F. Galton. Regression towards mediocrity in hereditary stature . Journal of the Anthropological Institute of Great Britain and Ireland, pages 246\u2013263, 1886. A. I. Khuri. Introduction to linear regression analysis , by Douglas C. Montgomery, Elizabeth A. Peck, G. Geoffrey Vining. International Statistical Review, 81(2):318\u2013319, 2013. D. S. G. Pollock. The Classical Linear Regression Model . Example 1 - Closed Form Solution import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) ne_lr = LinearRegression(minibatches=None) ne_lr.fit(X, y) print('Intercept: %.2f' % ne_lr.b_) print('Slope: %.2f' % ne_lr.w_[0]) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, ne_lr) plt.show() Intercept: 0.25 Slope: 0.81 Example 2 - Gradient Descent import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) gd_lr = LinearRegression(eta=0.005, epochs=100, minibatches=1, random_seed=123, print_progress=3) gd_lr.fit(X, y) print('Intercept: %.2f' % gd_lr.w_) print('Slope: %.2f' % gd_lr.b_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, gd_lr) plt.show() Iteration: 100/100 | Cost 0.08 | Elapsed: 0:00:00 | ETA: 0:00:00 Intercept: 0.82 Slope: 0.22 # Visualizing the cost to check for convergence and plotting the linear model: plt.plot(range(1, gd_lr.epochs+1), gd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show() Example 3 - Stochastic Gradient Descent import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) sgd_lr = LinearRegression(eta=0.01, epochs=100, random_seed=0, minibatches=len(y)) sgd_lr.fit(X, y) print('Intercept: %.2f' % sgd_lr.b_) print('Slope: %.2f' % sgd_lr.w_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, sgd_lr) plt.show() Intercept: 0.24 Slope: 0.82 plt.plot(range(1, sgd_lr.epochs+1), sgd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show() Example 3 - Stochastic Gradient Descent with Minibatches import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) sgd_lr = LinearRegression(eta=0.01, epochs=100, random_seed=0, minibatches=3) sgd_lr.fit(X, y) print('Intercept: %.2f' % sgd_lr.b_) print('Slope: %.2f' % sgd_lr.w_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, sgd_lr) plt.show() Intercept: 0.24 Slope: 0.82 plt.plot(range(1, sgd_lr.epochs+1), sgd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show() API LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values.","title":"LinearRegression"},{"location":"user_guide/regressor/LinearRegression/#linearregression","text":"A implementation of Ordinary Least Squares simple and multiple linear regression. from mlxtend.regressor import LinearRegression","title":"LinearRegression"},{"location":"user_guide/regressor/LinearRegression/#overview","text":"Illustration of a simple linear regression model: In Ordinary Least Squares (OLS) Linear Regression, our goal is to find the line (or hyperplane) that minimizes the vertical offsets. Or in other words, we define the best-fitting line as the line that minimizes the sum of squared errors (SSE) or mean squared error (MSE) between our target variable (y) and our predicted output over all samples i in our dataset of size n . SSE = \\sum_i \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 MSE = \\frac{1}{n} \\times SSE Now, LinearRegression implements a linear regression model for performing ordinary least squares regression using one of the following three approaches: Normal Equations Gradient Descent Stochastic Gradient Descent","title":"Overview"},{"location":"user_guide/regressor/LinearRegression/#normal-equations-closed-form-solution","text":"The closed-form solution should be preferred for \"smaller\" datasets where calculating (a \"costly\") matrix inverse is not a concern. For very large datasets, or datasets where the inverse of [X^T X] may not exist (the matrix is non-invertible or singular, e.g., in case of perfect multicollinearity), the gradient descent or stochastic gradient descent approaches are to be preferred. The linear function (linear regression model) is defined as: y = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{i=0}^{n} = \\mathbf{w}^T\\mathbf{x} where y is the response variable, \\mathbf{x} is an m -dimensional sample vector, and \\mathbf{w} is the weight vector (vector of coefficients). Note that w_0 represents the y-axis intercept of the model and therefore x_0=1 . Using the closed-form solution (normal equation), we compute the weights of the model as follows: \\mathbf{w} = (\\mathbf{X}^T\\mathbf{X})^{-1}\\mathbf{X}^Ty","title":"Normal Equations (closed-form solution)"},{"location":"user_guide/regressor/LinearRegression/#gradient-descent-gd-and-stochastic-gradient-descent-sgd","text":"See Gradient Descent and Stochastic Gradient Descent and Deriving the Gradient Descent Rule for Linear Regression and Adaline for details. Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates","title":"Gradient Descent (GD) and Stochastic Gradient Descent (SGD)"},{"location":"user_guide/regressor/LinearRegression/#references","text":"F. Galton. Regression towards mediocrity in hereditary stature . Journal of the Anthropological Institute of Great Britain and Ireland, pages 246\u2013263, 1886. A. I. Khuri. Introduction to linear regression analysis , by Douglas C. Montgomery, Elizabeth A. Peck, G. Geoffrey Vining. International Statistical Review, 81(2):318\u2013319, 2013. D. S. G. Pollock. The Classical Linear Regression Model .","title":"References"},{"location":"user_guide/regressor/LinearRegression/#example-1-closed-form-solution","text":"import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) ne_lr = LinearRegression(minibatches=None) ne_lr.fit(X, y) print('Intercept: %.2f' % ne_lr.b_) print('Slope: %.2f' % ne_lr.w_[0]) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, ne_lr) plt.show() Intercept: 0.25 Slope: 0.81","title":"Example 1 - Closed Form Solution"},{"location":"user_guide/regressor/LinearRegression/#example-2-gradient-descent","text":"import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) gd_lr = LinearRegression(eta=0.005, epochs=100, minibatches=1, random_seed=123, print_progress=3) gd_lr.fit(X, y) print('Intercept: %.2f' % gd_lr.w_) print('Slope: %.2f' % gd_lr.b_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, gd_lr) plt.show() Iteration: 100/100 | Cost 0.08 | Elapsed: 0:00:00 | ETA: 0:00:00 Intercept: 0.82 Slope: 0.22 # Visualizing the cost to check for convergence and plotting the linear model: plt.plot(range(1, gd_lr.epochs+1), gd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show()","title":"Example 2 - Gradient Descent"},{"location":"user_guide/regressor/LinearRegression/#example-3-stochastic-gradient-descent","text":"import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) sgd_lr = LinearRegression(eta=0.01, epochs=100, random_seed=0, minibatches=len(y)) sgd_lr.fit(X, y) print('Intercept: %.2f' % sgd_lr.b_) print('Slope: %.2f' % sgd_lr.w_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, sgd_lr) plt.show() Intercept: 0.24 Slope: 0.82 plt.plot(range(1, sgd_lr.epochs+1), sgd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show()","title":"Example 3 - Stochastic Gradient Descent"},{"location":"user_guide/regressor/LinearRegression/#example-3-stochastic-gradient-descent-with-minibatches","text":"import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) sgd_lr = LinearRegression(eta=0.01, epochs=100, random_seed=0, minibatches=3) sgd_lr.fit(X, y) print('Intercept: %.2f' % sgd_lr.b_) print('Slope: %.2f' % sgd_lr.w_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, sgd_lr) plt.show() Intercept: 0.24 Slope: 0.82 plt.plot(range(1, sgd_lr.epochs+1), sgd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show()","title":"Example 3 - Stochastic Gradient Descent with Minibatches"},{"location":"user_guide/regressor/LinearRegression/#api","text":"LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/","title":"API"},{"location":"user_guide/regressor/LinearRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values.","title":"Methods"},{"location":"user_guide/regressor/StackingCVRegressor/","text":"StackingCVRegressor An ensemble-learning meta-regressor for stacking regression from mlxtend.regressor import StackingCVRegressor Overview Stacking is an ensemble learning technique to combine multiple regression models via a meta-regressor. The StackingCVRegressor extends the standard stacking algorithm (implemented as StackingRegressor ) using out-of-fold predictions to prepare the input data for the level-2 regressor. In the standard stacking procedure, the first-level regressors are fit to the same training set that is used prepare the inputs for the second-level regressor, which may lead to overfitting. The StackingCVRegressor , however, uses the concept of out-of-fold predictions: the dataset is split into k folds, and in k successive rounds, k-1 folds are used to fit the first level regressor. In each round, the first-level regressors are then applied to the remaining 1 subset that was not used for model fitting in each iteration. The resulting predictions are then stacked and provided -- as input data -- to the second-level regressor. After the training of the StackingCVRegressor , the first-level regressors are fit to the entire dataset for optimal predicitons. References Breiman, Leo. \" Stacked regressions. \" Machine learning 24.1 (1996): 49-64. Analogous implementation: StackingCVClassifier Example 1: Boston Housing Data Predictions In this example we evaluate some basic prediction models on the boston housing dataset and see how the R^2 and MSE scores are affected by combining the models with StackingCVRegressor . The code output below demonstrates that the stacked model performs the best on this dataset -- slightly better than the best single regression model. from mlxtend.regressor import StackingCVRegressor from sklearn.datasets import load_boston from sklearn.svm import SVR from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import cross_val_score import numpy as np RANDOM_SEED = 42 X, y = load_boston(return_X_y=True) svr = SVR(kernel='linear') lasso = Lasso() rf = RandomForestRegressor(n_estimators=5, random_state=RANDOM_SEED) # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(svr, lasso, rf), meta_regressor=lasso) print('5-fold cross validation scores:\\n') for clf, label in zip([svr, lasso, rf, stack], ['SVM', 'Lasso', 'Random Forest', 'StackingCVRegressor']): scores = cross_val_score(clf, X, y, cv=5) print(\"R^2 Score: %0.2f (+/- %0.2f) [%s]\" % ( scores.mean(), scores.std(), label)) 5-fold cross validation scores: R^2 Score: 0.45 (+/- 0.29) [SVM] R^2 Score: 0.43 (+/- 0.14) [Lasso] R^2 Score: 0.52 (+/- 0.28) [Random Forest] R^2 Score: 0.58 (+/- 0.24) [StackingCVRegressor] # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(svr, lasso, rf), meta_regressor=lasso) print('5-fold cross validation scores:\\n') for clf, label in zip([svr, lasso, rf, stack], ['SVM', 'Lasso', 'Random Forest', 'StackingCVRegressor']): scores = cross_val_score(clf, X, y, cv=5, scoring='neg_mean_squared_error') print(\"Neg. MSE Score: %0.2f (+/- %0.2f) [%s]\" % ( scores.mean(), scores.std(), label)) 5-fold cross validation scores: Neg. MSE Score: -33.69 (+/- 22.36) [SVM] Neg. MSE Score: -35.53 (+/- 16.99) [Lasso] Neg. MSE Score: -27.32 (+/- 16.62) [Random Forest] Neg. MSE Score: -25.64 (+/- 18.11) [StackingCVRegressor] Example 2: GridSearchCV with Stacking In this second example we demonstrate how StackingCVRegressor works in combination with GridSearchCV . The stack still allows tuning hyper parameters of the base and meta models! To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from mlxtend.regressor import StackingCVRegressor from sklearn.datasets import load_boston from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import GridSearchCV X, y = load_boston(return_X_y=True) ridge = Ridge() lasso = Lasso() rf = RandomForestRegressor(random_state=RANDOM_SEED) # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(lasso, ridge), meta_regressor=rf, use_features_in_secondary=True) params = {'lasso__alpha': [0.1, 1.0, 10.0], 'ridge__alpha': [0.1, 1.0, 10.0]} grid = GridSearchCV( estimator=stack, param_grid={ 'lasso__alpha': [x/5.0 for x in range(1, 10)], 'ridge__alpha': [x/20.0 for x in range(1, 10)], 'meta-randomforestregressor__n_estimators': [10, 100] }, cv=5, refit=True ) grid.fit(X, y) print(\"Best: %f using %s\" % (grid.best_score_, grid.best_params_)) Best: 0.673590 using {'lasso__alpha': 0.4, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) if r > 10: break print('...') print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.622 +/- 0.10 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.05} 0.649 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.1} 0.650 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.15} 0.667 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.2} 0.629 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.25} 0.663 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} 0.633 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.35} 0.637 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.4} 0.649 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.45} 0.653 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.05} 0.648 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.1} 0.645 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.15} ... Best parameters: {'lasso__alpha': 0.4, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} Accuracy: 0.67 Note The StackingCVRegressor also enables grid search over the regressors argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, different regressors and regressor parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestregressor__n_estimators': [1, 100], 'regressors': [(regr1, regr1, regr1), (regr2, regr3)]} it will use the instance settings of regr1 , regr2 , and regr3 and not overwrite it with the 'n_estimators' settings from 'randomforestregressor__n_estimators': [1, 100] . API StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingCVRegressor"},{"location":"user_guide/regressor/StackingCVRegressor/#stackingcvregressor","text":"An ensemble-learning meta-regressor for stacking regression from mlxtend.regressor import StackingCVRegressor","title":"StackingCVRegressor"},{"location":"user_guide/regressor/StackingCVRegressor/#overview","text":"Stacking is an ensemble learning technique to combine multiple regression models via a meta-regressor. The StackingCVRegressor extends the standard stacking algorithm (implemented as StackingRegressor ) using out-of-fold predictions to prepare the input data for the level-2 regressor. In the standard stacking procedure, the first-level regressors are fit to the same training set that is used prepare the inputs for the second-level regressor, which may lead to overfitting. The StackingCVRegressor , however, uses the concept of out-of-fold predictions: the dataset is split into k folds, and in k successive rounds, k-1 folds are used to fit the first level regressor. In each round, the first-level regressors are then applied to the remaining 1 subset that was not used for model fitting in each iteration. The resulting predictions are then stacked and provided -- as input data -- to the second-level regressor. After the training of the StackingCVRegressor , the first-level regressors are fit to the entire dataset for optimal predicitons.","title":"Overview"},{"location":"user_guide/regressor/StackingCVRegressor/#references","text":"Breiman, Leo. \" Stacked regressions. \" Machine learning 24.1 (1996): 49-64. Analogous implementation: StackingCVClassifier","title":"References"},{"location":"user_guide/regressor/StackingCVRegressor/#example-1-boston-housing-data-predictions","text":"In this example we evaluate some basic prediction models on the boston housing dataset and see how the R^2 and MSE scores are affected by combining the models with StackingCVRegressor . The code output below demonstrates that the stacked model performs the best on this dataset -- slightly better than the best single regression model. from mlxtend.regressor import StackingCVRegressor from sklearn.datasets import load_boston from sklearn.svm import SVR from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import cross_val_score import numpy as np RANDOM_SEED = 42 X, y = load_boston(return_X_y=True) svr = SVR(kernel='linear') lasso = Lasso() rf = RandomForestRegressor(n_estimators=5, random_state=RANDOM_SEED) # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(svr, lasso, rf), meta_regressor=lasso) print('5-fold cross validation scores:\\n') for clf, label in zip([svr, lasso, rf, stack], ['SVM', 'Lasso', 'Random Forest', 'StackingCVRegressor']): scores = cross_val_score(clf, X, y, cv=5) print(\"R^2 Score: %0.2f (+/- %0.2f) [%s]\" % ( scores.mean(), scores.std(), label)) 5-fold cross validation scores: R^2 Score: 0.45 (+/- 0.29) [SVM] R^2 Score: 0.43 (+/- 0.14) [Lasso] R^2 Score: 0.52 (+/- 0.28) [Random Forest] R^2 Score: 0.58 (+/- 0.24) [StackingCVRegressor] # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(svr, lasso, rf), meta_regressor=lasso) print('5-fold cross validation scores:\\n') for clf, label in zip([svr, lasso, rf, stack], ['SVM', 'Lasso', 'Random Forest', 'StackingCVRegressor']): scores = cross_val_score(clf, X, y, cv=5, scoring='neg_mean_squared_error') print(\"Neg. MSE Score: %0.2f (+/- %0.2f) [%s]\" % ( scores.mean(), scores.std(), label)) 5-fold cross validation scores: Neg. MSE Score: -33.69 (+/- 22.36) [SVM] Neg. MSE Score: -35.53 (+/- 16.99) [Lasso] Neg. MSE Score: -27.32 (+/- 16.62) [Random Forest] Neg. MSE Score: -25.64 (+/- 18.11) [StackingCVRegressor]","title":"Example 1: Boston Housing Data Predictions"},{"location":"user_guide/regressor/StackingCVRegressor/#example-2-gridsearchcv-with-stacking","text":"In this second example we demonstrate how StackingCVRegressor works in combination with GridSearchCV . The stack still allows tuning hyper parameters of the base and meta models! To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from mlxtend.regressor import StackingCVRegressor from sklearn.datasets import load_boston from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import GridSearchCV X, y = load_boston(return_X_y=True) ridge = Ridge() lasso = Lasso() rf = RandomForestRegressor(random_state=RANDOM_SEED) # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(lasso, ridge), meta_regressor=rf, use_features_in_secondary=True) params = {'lasso__alpha': [0.1, 1.0, 10.0], 'ridge__alpha': [0.1, 1.0, 10.0]} grid = GridSearchCV( estimator=stack, param_grid={ 'lasso__alpha': [x/5.0 for x in range(1, 10)], 'ridge__alpha': [x/20.0 for x in range(1, 10)], 'meta-randomforestregressor__n_estimators': [10, 100] }, cv=5, refit=True ) grid.fit(X, y) print(\"Best: %f using %s\" % (grid.best_score_, grid.best_params_)) Best: 0.673590 using {'lasso__alpha': 0.4, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) if r > 10: break print('...') print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.622 +/- 0.10 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.05} 0.649 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.1} 0.650 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.15} 0.667 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.2} 0.629 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.25} 0.663 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} 0.633 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.35} 0.637 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.4} 0.649 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.45} 0.653 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.05} 0.648 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.1} 0.645 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.15} ... Best parameters: {'lasso__alpha': 0.4, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} Accuracy: 0.67 Note The StackingCVRegressor also enables grid search over the regressors argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, different regressors and regressor parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestregressor__n_estimators': [1, 100], 'regressors': [(regr1, regr1, regr1), (regr2, regr3)]} it will use the instance settings of regr1 , regr2 , and regr3 and not overwrite it with the 'n_estimators' settings from 'randomforestregressor__n_estimators': [1, 100] .","title":"Example 2: GridSearchCV with Stacking"},{"location":"user_guide/regressor/StackingCVRegressor/#api","text":"StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/","title":"API"},{"location":"user_guide/regressor/StackingCVRegressor/#methods","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"user_guide/regressor/StackingRegressor/","text":"StackingRegressor An ensemble-learning meta-regressor for stacking regression from mlxtend.regressor import StackingRegressor Overview Stacking regression is an ensemble learning technique to combine multiple regression models via a meta-regressor. The individual regression models are trained based on the complete training set; then, the meta-regressor is fitted based on the outputs -- meta-features -- of the individual regression models in the ensemble. References Breiman, Leo. \" Stacked regressions. \" Machine learning 24.1 (1996): 49-64. Example 1 - Simple Stacked Regression from mlxtend.regressor import StackingRegressor from mlxtend.data import boston_housing_data from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.svm import SVR import matplotlib.pyplot as plt import numpy as np # Generating a sample dataset np.random.seed(1) X = np.sort(5 * np.random.rand(40, 1), axis=0) y = np.sin(X).ravel() y[::5] += 3 * (0.5 - np.random.rand(8)) # Initializing models lr = LinearRegression() svr_lin = SVR(kernel='linear') ridge = Ridge(random_state=1) svr_rbf = SVR(kernel='rbf') stregr = StackingRegressor(regressors=[svr_lin, lr, ridge], meta_regressor=svr_rbf) # Training the stacking classifier stregr.fit(X, y) stregr.predict(X) # Evaluate and visualize the fit print(\"Mean Squared Error: %.4f\" % np.mean((stregr.predict(X) - y) ** 2)) print('Variance Score: %.4f' % stregr.score(X, y)) with plt.style.context(('seaborn-whitegrid')): plt.scatter(X, y, c='lightgray') plt.plot(X, stregr.predict(X), c='darkgreen', lw=2) plt.show() Mean Squared Error: 0.2039 Variance Score: 0.7049 stregr StackingRegressor(meta_regressor=SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma='auto', kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False), regressors=[SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma='auto', kernel='linear', max_iter=-1, shrinking=True, tol=0.001, verbose=False), LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False), Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=1, solver='auto', tol=0.001)], verbose=0) Example 2 - Stacked Regression and GridSearch To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso # Initializing models lr = LinearRegression() svr_lin = SVR(kernel='linear') ridge = Ridge(random_state=1) lasso = Lasso(random_state=1) svr_rbf = SVR(kernel='rbf') regressors = [svr_lin, lr, ridge, lasso] stregr = StackingRegressor(regressors=regressors, meta_regressor=svr_rbf) params = {'lasso__alpha': [0.1, 1.0, 10.0], 'ridge__alpha': [0.1, 1.0, 10.0], 'svr__C': [0.1, 1.0, 10.0], 'meta-svr__C': [0.1, 1.0, 10.0, 100.0], 'meta-svr__gamma': [0.1, 1.0, 10.0]} grid = GridSearchCV(estimator=stregr, param_grid=params, cv=5, refit=True) grid.fit(X, y) for params, mean_score, scores in grid.grid_scores_: print(\"%0.3f +/- %0.2f %r\" % (mean_score, scores.std() / 2.0, params)) -9.810 +/- 6.86 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.591 +/- 6.67 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.591 +/- 6.67 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.819 +/- 6.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.600 +/- 6.68 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.600 +/- 6.68 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.878 +/- 6.91 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.665 +/- 6.71 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.665 +/- 6.71 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -4.839 +/- 3.98 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -3.986 +/- 3.16 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -3.986 +/- 3.16 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.875 +/- 4.01 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.005 +/- 3.17 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.005 +/- 3.17 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.162 +/- 4.27 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.166 +/- 3.31 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.166 +/- 3.31 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.872 +/- 3.05 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.566 +/- 3.72 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.566 +/- 3.72 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.848 +/- 3.03 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.550 +/- 3.70 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.550 +/- 3.70 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.674 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.387 +/- 3.55 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.387 +/- 3.55 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.857 +/- 4.32 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.105 +/- 3.69 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.081 +/- 3.69 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.866 +/- 4.33 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.144 +/- 3.71 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.144 +/- 3.71 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.952 +/- 4.37 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.452 +/- 3.94 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.452 +/- 3.94 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.240 +/- 0.18 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.083 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.083 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.251 +/- 0.19 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.086 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.086 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.270 +/- 0.20 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.107 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.107 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.639 +/- 1.12 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.256 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.256 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.616 +/- 1.10 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.237 +/- 1.68 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.237 +/- 1.68 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.437 +/- 0.95 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.096 +/- 1.57 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.096 +/- 1.57 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.362 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.671 +/- 0.22 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.670 +/- 0.22 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.404 +/- 0.91 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.682 +/- 0.23 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.682 +/- 0.23 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.692 +/- 1.16 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.819 +/- 0.34 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.819 +/- 0.34 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.159 +/- 1.13 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.734 +/- 0.72 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.734 +/- 0.72 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.200 +/- 1.17 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.751 +/- 0.74 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.750 +/- 0.73 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.239 +/- 1.21 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.890 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.889 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.735 +/- 0.52 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.247 +/- 0.81 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.247 +/- 0.81 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.725 +/- 0.52 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.212 +/- 0.79 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.211 +/- 0.79 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.640 +/- 0.48 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.980 +/- 0.63 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.979 +/- 0.63 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.669 +/- 2.59 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.038 +/- 2.95 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.037 +/- 2.95 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.671 +/- 2.60 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.957 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.952 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.660 +/- 2.59 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.997 +/- 2.93 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.999 +/- 2.93 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.648 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.371 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.370 +/- 1.40 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.679 +/- 1.73 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.371 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.893 +/- 1.94 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.377 +/- 1.43 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.377 +/- 1.42 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -4.113 +/- 3.15 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -13.276 +/- 9.35 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -13.287 +/- 9.36 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -3.946 +/- 3.11 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -12.797 +/- 8.93 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -12.797 +/- 8.93 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -3.551 +/- 2.90 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.457 +/- 6.08 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.447 +/- 6.08 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.941 +/- 6.89 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.953 +/- 6.90 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -10.035 +/- 6.93 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -5.238 +/- 4.24 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.277 +/- 4.28 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.584 +/- 4.56 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.649 +/- 2.88 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.625 +/- 2.86 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.430 +/- 2.69 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -6.131 +/- 4.33 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.150 +/- 4.34 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.300 +/- 4.44 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.286 +/- 0.21 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.290 +/- 0.21 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.263 +/- 0.19 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.162 +/- 0.14 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.161 +/- 0.14 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.386 +/- 0.96 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.361 +/- 0.94 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.182 +/- 0.79 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.873 +/- 1.43 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.874 +/- 1.44 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.775 +/- 1.14 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.902 +/- 0.32 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.903 +/- 0.32 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.812 +/- 1.17 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.923 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.922 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -2.085 +/- 1.44 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.080 +/- 0.47 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.079 +/- 0.47 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.208 +/- 1.22 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.865 +/- 0.87 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.864 +/- 0.87 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.218 +/- 1.23 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.881 +/- 0.89 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.877 +/- 0.89 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.39 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.031 +/- 1.05 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.034 +/- 1.05 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.532 +/- 0.38 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.524 +/- 0.37 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.847 +/- 0.55 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.848 +/- 0.55 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.445 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.669 +/- 0.43 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.670 +/- 0.43 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.682 +/- 2.59 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.688 +/- 2.59 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.022 +/- 2.93 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.019 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.586 +/- 2.48 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.771 +/- 2.68 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.772 +/- 2.68 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.901 +/- 1.93 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.933 +/- 1.96 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.388 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.387 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.159 +/- 2.17 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.620 +/- 1.60 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.549 +/- 5.97 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.543 +/- 5.97 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.607 +/- 1.54 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.940 +/- 5.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.962 +/- 5.45 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.615 +/- 1.28 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.429 +/- 3.35 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.418 +/- 3.35 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.941 +/- 6.89 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.953 +/- 6.90 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -10.035 +/- 6.93 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -5.238 +/- 4.24 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.277 +/- 4.28 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.584 +/- 4.56 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.649 +/- 2.88 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.625 +/- 2.86 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.430 +/- 2.69 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -6.131 +/- 4.33 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.150 +/- 4.34 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.300 +/- 4.44 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.286 +/- 0.21 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.290 +/- 0.21 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.263 +/- 0.19 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.162 +/- 0.14 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.161 +/- 0.14 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.386 +/- 0.96 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.361 +/- 0.94 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.182 +/- 0.79 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.873 +/- 1.43 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.874 +/- 1.44 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.775 +/- 1.14 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.902 +/- 0.32 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.903 +/- 0.32 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.812 +/- 1.17 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.923 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.922 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -2.085 +/- 1.44 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.080 +/- 0.47 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.079 +/- 0.47 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.208 +/- 1.22 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.865 +/- 0.87 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.864 +/- 0.87 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.218 +/- 1.23 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.881 +/- 0.89 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.877 +/- 0.89 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.39 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.031 +/- 1.05 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.034 +/- 1.05 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.532 +/- 0.38 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.524 +/- 0.37 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.847 +/- 0.55 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.848 +/- 0.55 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.445 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.669 +/- 0.43 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.670 +/- 0.43 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.682 +/- 2.59 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.688 +/- 2.59 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.022 +/- 2.93 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.019 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.586 +/- 2.48 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.771 +/- 2.68 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.772 +/- 2.68 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.901 +/- 1.93 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.933 +/- 1.96 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.388 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.387 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.159 +/- 2.17 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.620 +/- 1.60 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.549 +/- 5.97 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.543 +/- 5.97 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.607 +/- 1.54 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.940 +/- 5.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.962 +/- 5.45 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.615 +/- 1.28 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.429 +/- 3.35 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.418 +/- 3.35 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} /Users/Sebastian/miniconda3/lib/python3.5/site-packages/sklearn/model_selection/_search.py:662: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning) # Evaluate and visualize the fit print(\"Mean Squared Error: %.4f\" % np.mean((grid.predict(X) - y) ** 2)) print('Variance Score: %.4f' % grid.score(X, y)) with plt.style.context(('seaborn-whitegrid')): plt.scatter(X, y, c='lightgray') plt.plot(X, grid.predict(X), c='darkgreen', lw=2) plt.show() Mean Squared Error: 0.1844 Variance Score: 0.7331 Note The StackingRegressor also enables grid search over the regressors argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestregressor__n_estimators': [1, 100], 'regressors': [(regr1, regr1, regr1), (regr2, regr3)]} it will use the instance settings of regr1 , regr2 , and regr3 and not overwrite it with the 'n_estimators' settings from 'randomforestregressor__n_estimators': [1, 100] . API StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self Properties coef_ None intercept_ None","title":"StackingRegressor"},{"location":"user_guide/regressor/StackingRegressor/#stackingregressor","text":"An ensemble-learning meta-regressor for stacking regression from mlxtend.regressor import StackingRegressor","title":"StackingRegressor"},{"location":"user_guide/regressor/StackingRegressor/#overview","text":"Stacking regression is an ensemble learning technique to combine multiple regression models via a meta-regressor. The individual regression models are trained based on the complete training set; then, the meta-regressor is fitted based on the outputs -- meta-features -- of the individual regression models in the ensemble.","title":"Overview"},{"location":"user_guide/regressor/StackingRegressor/#references","text":"Breiman, Leo. \" Stacked regressions. \" Machine learning 24.1 (1996): 49-64.","title":"References"},{"location":"user_guide/regressor/StackingRegressor/#example-1-simple-stacked-regression","text":"from mlxtend.regressor import StackingRegressor from mlxtend.data import boston_housing_data from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.svm import SVR import matplotlib.pyplot as plt import numpy as np # Generating a sample dataset np.random.seed(1) X = np.sort(5 * np.random.rand(40, 1), axis=0) y = np.sin(X).ravel() y[::5] += 3 * (0.5 - np.random.rand(8)) # Initializing models lr = LinearRegression() svr_lin = SVR(kernel='linear') ridge = Ridge(random_state=1) svr_rbf = SVR(kernel='rbf') stregr = StackingRegressor(regressors=[svr_lin, lr, ridge], meta_regressor=svr_rbf) # Training the stacking classifier stregr.fit(X, y) stregr.predict(X) # Evaluate and visualize the fit print(\"Mean Squared Error: %.4f\" % np.mean((stregr.predict(X) - y) ** 2)) print('Variance Score: %.4f' % stregr.score(X, y)) with plt.style.context(('seaborn-whitegrid')): plt.scatter(X, y, c='lightgray') plt.plot(X, stregr.predict(X), c='darkgreen', lw=2) plt.show() Mean Squared Error: 0.2039 Variance Score: 0.7049 stregr StackingRegressor(meta_regressor=SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma='auto', kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False), regressors=[SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma='auto', kernel='linear', max_iter=-1, shrinking=True, tol=0.001, verbose=False), LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False), Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=1, solver='auto', tol=0.001)], verbose=0)","title":"Example 1 - Simple Stacked Regression"},{"location":"user_guide/regressor/StackingRegressor/#example-2-stacked-regression-and-gridsearch","text":"To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso # Initializing models lr = LinearRegression() svr_lin = SVR(kernel='linear') ridge = Ridge(random_state=1) lasso = Lasso(random_state=1) svr_rbf = SVR(kernel='rbf') regressors = [svr_lin, lr, ridge, lasso] stregr = StackingRegressor(regressors=regressors, meta_regressor=svr_rbf) params = {'lasso__alpha': [0.1, 1.0, 10.0], 'ridge__alpha': [0.1, 1.0, 10.0], 'svr__C': [0.1, 1.0, 10.0], 'meta-svr__C': [0.1, 1.0, 10.0, 100.0], 'meta-svr__gamma': [0.1, 1.0, 10.0]} grid = GridSearchCV(estimator=stregr, param_grid=params, cv=5, refit=True) grid.fit(X, y) for params, mean_score, scores in grid.grid_scores_: print(\"%0.3f +/- %0.2f %r\" % (mean_score, scores.std() / 2.0, params)) -9.810 +/- 6.86 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.591 +/- 6.67 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.591 +/- 6.67 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.819 +/- 6.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.600 +/- 6.68 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.600 +/- 6.68 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.878 +/- 6.91 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.665 +/- 6.71 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.665 +/- 6.71 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -4.839 +/- 3.98 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -3.986 +/- 3.16 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -3.986 +/- 3.16 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.875 +/- 4.01 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.005 +/- 3.17 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.005 +/- 3.17 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.162 +/- 4.27 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.166 +/- 3.31 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.166 +/- 3.31 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.872 +/- 3.05 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.566 +/- 3.72 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.566 +/- 3.72 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.848 +/- 3.03 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.550 +/- 3.70 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.550 +/- 3.70 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.674 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.387 +/- 3.55 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.387 +/- 3.55 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.857 +/- 4.32 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.105 +/- 3.69 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.081 +/- 3.69 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.866 +/- 4.33 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.144 +/- 3.71 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.144 +/- 3.71 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.952 +/- 4.37 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.452 +/- 3.94 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.452 +/- 3.94 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.240 +/- 0.18 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.083 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.083 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.251 +/- 0.19 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.086 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.086 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.270 +/- 0.20 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.107 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.107 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.639 +/- 1.12 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.256 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.256 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.616 +/- 1.10 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.237 +/- 1.68 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.237 +/- 1.68 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.437 +/- 0.95 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.096 +/- 1.57 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.096 +/- 1.57 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.362 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.671 +/- 0.22 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.670 +/- 0.22 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.404 +/- 0.91 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.682 +/- 0.23 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.682 +/- 0.23 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.692 +/- 1.16 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.819 +/- 0.34 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.819 +/- 0.34 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.159 +/- 1.13 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.734 +/- 0.72 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.734 +/- 0.72 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.200 +/- 1.17 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.751 +/- 0.74 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.750 +/- 0.73 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.239 +/- 1.21 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.890 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.889 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.735 +/- 0.52 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.247 +/- 0.81 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.247 +/- 0.81 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.725 +/- 0.52 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.212 +/- 0.79 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.211 +/- 0.79 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.640 +/- 0.48 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.980 +/- 0.63 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.979 +/- 0.63 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.669 +/- 2.59 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.038 +/- 2.95 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.037 +/- 2.95 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.671 +/- 2.60 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.957 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.952 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.660 +/- 2.59 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.997 +/- 2.93 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.999 +/- 2.93 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.648 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.371 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.370 +/- 1.40 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.679 +/- 1.73 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.371 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.893 +/- 1.94 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.377 +/- 1.43 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.377 +/- 1.42 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -4.113 +/- 3.15 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -13.276 +/- 9.35 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -13.287 +/- 9.36 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -3.946 +/- 3.11 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -12.797 +/- 8.93 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -12.797 +/- 8.93 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -3.551 +/- 2.90 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.457 +/- 6.08 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.447 +/- 6.08 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.941 +/- 6.89 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.953 +/- 6.90 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -10.035 +/- 6.93 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -5.238 +/- 4.24 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.277 +/- 4.28 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.584 +/- 4.56 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.649 +/- 2.88 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.625 +/- 2.86 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.430 +/- 2.69 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -6.131 +/- 4.33 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.150 +/- 4.34 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.300 +/- 4.44 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.286 +/- 0.21 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.290 +/- 0.21 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.263 +/- 0.19 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.162 +/- 0.14 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.161 +/- 0.14 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.386 +/- 0.96 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.361 +/- 0.94 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.182 +/- 0.79 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.873 +/- 1.43 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.874 +/- 1.44 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.775 +/- 1.14 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.902 +/- 0.32 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.903 +/- 0.32 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.812 +/- 1.17 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.923 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.922 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -2.085 +/- 1.44 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.080 +/- 0.47 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.079 +/- 0.47 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.208 +/- 1.22 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.865 +/- 0.87 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.864 +/- 0.87 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.218 +/- 1.23 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.881 +/- 0.89 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.877 +/- 0.89 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.39 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.031 +/- 1.05 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.034 +/- 1.05 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.532 +/- 0.38 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.524 +/- 0.37 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.847 +/- 0.55 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.848 +/- 0.55 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.445 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.669 +/- 0.43 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.670 +/- 0.43 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.682 +/- 2.59 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.688 +/- 2.59 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.022 +/- 2.93 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.019 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.586 +/- 2.48 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.771 +/- 2.68 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.772 +/- 2.68 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.901 +/- 1.93 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.933 +/- 1.96 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.388 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.387 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.159 +/- 2.17 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.620 +/- 1.60 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.549 +/- 5.97 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.543 +/- 5.97 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.607 +/- 1.54 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.940 +/- 5.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.962 +/- 5.45 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.615 +/- 1.28 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.429 +/- 3.35 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.418 +/- 3.35 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.941 +/- 6.89 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.953 +/- 6.90 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -10.035 +/- 6.93 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -5.238 +/- 4.24 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.277 +/- 4.28 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.584 +/- 4.56 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.649 +/- 2.88 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.625 +/- 2.86 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.430 +/- 2.69 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -6.131 +/- 4.33 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.150 +/- 4.34 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.300 +/- 4.44 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.286 +/- 0.21 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.290 +/- 0.21 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.263 +/- 0.19 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.162 +/- 0.14 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.161 +/- 0.14 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.386 +/- 0.96 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.361 +/- 0.94 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.182 +/- 0.79 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.873 +/- 1.43 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.874 +/- 1.44 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.775 +/- 1.14 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.902 +/- 0.32 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.903 +/- 0.32 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.812 +/- 1.17 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.923 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.922 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -2.085 +/- 1.44 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.080 +/- 0.47 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.079 +/- 0.47 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.208 +/- 1.22 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.865 +/- 0.87 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.864 +/- 0.87 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.218 +/- 1.23 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.881 +/- 0.89 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.877 +/- 0.89 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.39 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.031 +/- 1.05 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.034 +/- 1.05 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.532 +/- 0.38 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.524 +/- 0.37 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.847 +/- 0.55 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.848 +/- 0.55 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.445 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.669 +/- 0.43 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.670 +/- 0.43 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.682 +/- 2.59 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.688 +/- 2.59 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.022 +/- 2.93 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.019 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.586 +/- 2.48 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.771 +/- 2.68 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.772 +/- 2.68 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.901 +/- 1.93 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.933 +/- 1.96 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.388 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.387 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.159 +/- 2.17 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.620 +/- 1.60 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.549 +/- 5.97 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.543 +/- 5.97 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.607 +/- 1.54 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.940 +/- 5.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.962 +/- 5.45 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.615 +/- 1.28 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.429 +/- 3.35 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.418 +/- 3.35 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} /Users/Sebastian/miniconda3/lib/python3.5/site-packages/sklearn/model_selection/_search.py:662: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning) # Evaluate and visualize the fit print(\"Mean Squared Error: %.4f\" % np.mean((grid.predict(X) - y) ** 2)) print('Variance Score: %.4f' % grid.score(X, y)) with plt.style.context(('seaborn-whitegrid')): plt.scatter(X, y, c='lightgray') plt.plot(X, grid.predict(X), c='darkgreen', lw=2) plt.show() Mean Squared Error: 0.1844 Variance Score: 0.7331 Note The StackingRegressor also enables grid search over the regressors argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestregressor__n_estimators': [1, 100], 'regressors': [(regr1, regr1, regr1), (regr2, regr3)]} it will use the instance settings of regr1 , regr2 , and regr3 and not overwrite it with the 'n_estimators' settings from 'randomforestregressor__n_estimators': [1, 100] .","title":"Example 2 - Stacked Regression and GridSearch"},{"location":"user_guide/regressor/StackingRegressor/#api","text":"StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/","title":"API"},{"location":"user_guide/regressor/StackingRegressor/#methods","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"user_guide/regressor/StackingRegressor/#properties","text":"coef_ None intercept_ None","title":"Properties"},{"location":"user_guide/text/generalize_names/","text":"Generalize Names A function that converts a name into a general format (all lowercase) . from mlxtend.text import generalize_names Overview A function that converts a name into a general format (all lowercase) , which is useful if data is collected from different sources and is supposed to be compared or merged based on name identifiers. E.g., if names are stored in a pandas DataFrame column, the apply function can be used to generalize names: df['name'] = df['name'].apply(generalize_names) References - Example 1 - Defaults from mlxtend.text import generalize_names generalize_names('Pozo, Jos\u00e9 \u00c1ngel') 'pozo j' generalize_names('Jos\u00e9 Pozo') 'pozo j' generalize_names('Jos\u00e9 \u00c1ngel Pozo') 'pozo j' Example 2 - Optional Parameters from mlxtend.text import generalize_names generalize_names(\"Eto'o, Samuel\", firstname_output_letters=2) 'etoo sa' generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) 'etoo' generalize_names(\"Eto'o, Samuel\", output_sep=', ') 'etoo, s' API generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/","title":"Generalize Names"},{"location":"user_guide/text/generalize_names/#generalize-names","text":"A function that converts a name into a general format (all lowercase) . from mlxtend.text import generalize_names","title":"Generalize Names"},{"location":"user_guide/text/generalize_names/#overview","text":"A function that converts a name into a general format (all lowercase) , which is useful if data is collected from different sources and is supposed to be compared or merged based on name identifiers. E.g., if names are stored in a pandas DataFrame column, the apply function can be used to generalize names: df['name'] = df['name'].apply(generalize_names)","title":"Overview"},{"location":"user_guide/text/generalize_names/#references","text":"-","title":"References"},{"location":"user_guide/text/generalize_names/#example-1-defaults","text":"from mlxtend.text import generalize_names generalize_names('Pozo, Jos\u00e9 \u00c1ngel') 'pozo j' generalize_names('Jos\u00e9 Pozo') 'pozo j' generalize_names('Jos\u00e9 \u00c1ngel Pozo') 'pozo j'","title":"Example 1 - Defaults"},{"location":"user_guide/text/generalize_names/#example-2-optional-parameters","text":"from mlxtend.text import generalize_names generalize_names(\"Eto'o, Samuel\", firstname_output_letters=2) 'etoo sa' generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) 'etoo' generalize_names(\"Eto'o, Samuel\", output_sep=', ') 'etoo, s'","title":"Example 2 - Optional Parameters"},{"location":"user_guide/text/generalize_names/#api","text":"generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/","title":"API"},{"location":"user_guide/text/generalize_names_duplcheck/","text":"Generalize Names & Duplicate Checking A function that converts a name into a general format (all lowercase) in a pandas DataFrame while avoiding duplicate entries. from mlxtend.text import generalize_names_duplcheck Overview Note that using mlxtend.text.generalize_names with few firstname_output_letters can result in duplicate entries. E.g., if your dataset contains the names \"Adam Johnson\" and \"Andrew Johnson\", the default setting (i.e., 1 first name letter) will produce the generalized name \"johnson a\" in both cases. One solution is to increase the number of first name letters in the output by setting the parameter firstname_output_letters to a value larger than 1. An alternative solution is to use the generalize_names_duplcheck function if you are working with pandas DataFrames. By default, generalize_names_duplcheck will apply generalize_names to a pandas DataFrame column with the minimum number of first name letters and append as many first name letters as necessary until no duplicates are present in the given DataFrame column. An example dataset column that contains the names References - Example 1 - Defaults Reading in a CSV file that has column Name for which we want to generalize the names: Samuel Eto'o Adam Johnson Andrew Johnson import pandas as pd from io import StringIO simulated_csv = \"name,some_value\\n\"\\ \"Samuel Eto'o,1\\n\"\\ \"Adam Johnson,1\\n\"\\ \"Andrew Johnson,1\\n\" df = pd.read_csv(StringIO(simulated_csv)) df name some_value 0 Samuel Eto'o 1 1 Adam Johnson 1 2 Andrew Johnson 1 Applying generalize_names_duplcheck to generate a new DataFrame with the generalized names without duplicates: from mlxtend.text import generalize_names_duplcheck df_new = generalize_names_duplcheck(df=df, col_name='name') df_new name some_value 0 etoo s 1 1 johnson ad 1 2 johnson an 1 API generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/","title":"Generalize Names & Duplicate Checking"},{"location":"user_guide/text/generalize_names_duplcheck/#generalize-names-duplicate-checking","text":"A function that converts a name into a general format (all lowercase) in a pandas DataFrame while avoiding duplicate entries. from mlxtend.text import generalize_names_duplcheck","title":"Generalize Names & Duplicate Checking"},{"location":"user_guide/text/generalize_names_duplcheck/#overview","text":"Note that using mlxtend.text.generalize_names with few firstname_output_letters can result in duplicate entries. E.g., if your dataset contains the names \"Adam Johnson\" and \"Andrew Johnson\", the default setting (i.e., 1 first name letter) will produce the generalized name \"johnson a\" in both cases. One solution is to increase the number of first name letters in the output by setting the parameter firstname_output_letters to a value larger than 1. An alternative solution is to use the generalize_names_duplcheck function if you are working with pandas DataFrames. By default, generalize_names_duplcheck will apply generalize_names to a pandas DataFrame column with the minimum number of first name letters and append as many first name letters as necessary until no duplicates are present in the given DataFrame column. An example dataset column that contains the names","title":"Overview"},{"location":"user_guide/text/generalize_names_duplcheck/#references","text":"-","title":"References"},{"location":"user_guide/text/generalize_names_duplcheck/#example-1-defaults","text":"Reading in a CSV file that has column Name for which we want to generalize the names: Samuel Eto'o Adam Johnson Andrew Johnson import pandas as pd from io import StringIO simulated_csv = \"name,some_value\\n\"\\ \"Samuel Eto'o,1\\n\"\\ \"Adam Johnson,1\\n\"\\ \"Andrew Johnson,1\\n\" df = pd.read_csv(StringIO(simulated_csv)) df name some_value 0 Samuel Eto'o 1 1 Adam Johnson 1 2 Andrew Johnson 1 Applying generalize_names_duplcheck to generate a new DataFrame with the generalized names without duplicates: from mlxtend.text import generalize_names_duplcheck df_new = generalize_names_duplcheck(df=df, col_name='name') df_new name some_value 0 etoo s 1 1 johnson ad 1 2 johnson an 1","title":"Example 1 - Defaults"},{"location":"user_guide/text/generalize_names_duplcheck/#api","text":"generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/","title":"API"},{"location":"user_guide/text/tokenizer/","text":"Tokenizer Different functions to tokenize text. from mlxtend.text import tokenizer_[type] Overview Different functions to tokenize text for natural language processing tasks, for example such as building a bag-of-words model for text classification. References - Example 1 - Extract Emoticons from mlxtend.text import tokenizer_emoticons tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] Example 2 - Extract Words and Emoticons from mlxtend.text import tokenizer_words_and_emoticons tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] API tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/ tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"Tokenizer"},{"location":"user_guide/text/tokenizer/#tokenizer","text":"Different functions to tokenize text. from mlxtend.text import tokenizer_[type]","title":"Tokenizer"},{"location":"user_guide/text/tokenizer/#overview","text":"Different functions to tokenize text for natural language processing tasks, for example such as building a bag-of-words model for text classification.","title":"Overview"},{"location":"user_guide/text/tokenizer/#references","text":"-","title":"References"},{"location":"user_guide/text/tokenizer/#example-1-extract-emoticons","text":"from mlxtend.text import tokenizer_emoticons tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)']","title":"Example 1 - Extract Emoticons"},{"location":"user_guide/text/tokenizer/#example-2-extract-words-and-emoticons","text":"from mlxtend.text import tokenizer_words_and_emoticons tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)']","title":"Example 2 - Extract Words and Emoticons"},{"location":"user_guide/text/tokenizer/#api","text":"tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/ tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"API"},{"location":"user_guide/utils/Counter/","text":"Counter A simple progress counter to print the number of iterations and time elapsed in a for-loop execution. from mlxtend.utils import Counter Overview The Counter class implements an object for displaying the number of iterations and time elapsed in a for-loop. Please note that the Counter was implemented for efficiency; thus, the Counter offers only very basic functionality in order to avoid relatively expensive evaluations (of if-else statements). References - Example 1 - Counting the iterations in a for-loop from mlxtend.utils import Counter import time cnt = Counter() for i in range(20): # do some computation time.sleep(0.1) cnt.update() 20 iter | 2 sec Note that the first number displays the current iteration, and the second number shows the time elapsed after initializing the Counter . API Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/ Methods update() Print current iteration and time elapsed.","title":"Counter"},{"location":"user_guide/utils/Counter/#counter","text":"A simple progress counter to print the number of iterations and time elapsed in a for-loop execution. from mlxtend.utils import Counter","title":"Counter"},{"location":"user_guide/utils/Counter/#overview","text":"The Counter class implements an object for displaying the number of iterations and time elapsed in a for-loop. Please note that the Counter was implemented for efficiency; thus, the Counter offers only very basic functionality in order to avoid relatively expensive evaluations (of if-else statements).","title":"Overview"},{"location":"user_guide/utils/Counter/#references","text":"-","title":"References"},{"location":"user_guide/utils/Counter/#example-1-counting-the-iterations-in-a-for-loop","text":"from mlxtend.utils import Counter import time cnt = Counter() for i in range(20): # do some computation time.sleep(0.1) cnt.update() 20 iter | 2 sec Note that the first number displays the current iteration, and the second number shows the time elapsed after initializing the Counter .","title":"Example 1 - Counting the iterations in a for-loop"},{"location":"user_guide/utils/Counter/#api","text":"Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/","title":"API"},{"location":"user_guide/utils/Counter/#methods","text":"update() Print current iteration and time elapsed.","title":"Methods"}]} \ No newline at end of file +{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Welcome to mlxtend's documentation! Mlxtend (machine learning extensions) is a Python library of useful tools for the day-to-day data science tasks. Links Documentation: http://rasbt.github.io/mlxtend Source code repository: https://github.com/rasbt/mlxtend PyPI: https://pypi.python.org/pypi/mlxtend Questions? Check out the Google Groups mailing list Examples import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import itertools from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions # Initializing Classifiers clf1 = LogisticRegression(random_state=0) clf2 = RandomForestClassifier(random_state=0) clf3 = SVC(random_state=0, probability=True) eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[2, 1, 1], voting='soft') # Loading some example data X, y = iris_data() X = X[:,[0, 2]] # Plotting Decision Regions gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10, 8)) labels = ['Logistic Regression', 'Random Forest', 'RBF kernel SVM', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() If you use mlxtend as part of your workflow in a scientific publication, please consider citing the mlxtend repository with the following DOI: @article{raschkas_2018_mlxtend, author = {Sebastian Raschka}, title = {MLxtend: Providing machine learning and data science utilities and extensions to Python\u2019s scientific computing stack}, journal = {The Journal of Open Source Software}, volume = {3}, number = {24}, month = apr, year = 2018, publisher = {The Open Journal}, doi = {10.21105/joss.00638}, url = {http://joss.theoj.org/papers/10.21105/joss.00638} } License This project is released under a permissive new BSD open source license ( LICENSE-BSD3.txt ) and commercially usable. There is no warranty; not even for merchantability or fitness for a particular purpose. In addition, you may use, copy, modify and redistribute all artistic creative works (figures and images) included in this distribution under the directory according to the terms and conditions of the Creative Commons Attribution 4.0 International License. See the file LICENSE-CC-BY.txt for details. (Computer-generated graphics such as the plots produced by matplotlib fall under the BSD license mentioned above). Contact I received a lot of feedback and questions about mlxtend recently, and I thought that it would be worthwhile to set up a public communication channel. Before you write an email with a question about mlxtend, please consider posting it here since it can also be useful to others! Please join the Google Groups Mailing List ! If Google Groups is not for you, please feel free to write me an email or consider filing an issue on GitHub's issue tracker for new feature requests or bug reports. In addition, I setup a Gitter channel for live discussions.","title":"Home"},{"location":"#welcome-to-mlxtends-documentation","text":"Mlxtend (machine learning extensions) is a Python library of useful tools for the day-to-day data science tasks.","title":"Welcome to mlxtend's documentation!"},{"location":"#links","text":"Documentation: http://rasbt.github.io/mlxtend Source code repository: https://github.com/rasbt/mlxtend PyPI: https://pypi.python.org/pypi/mlxtend Questions? Check out the Google Groups mailing list","title":"Links"},{"location":"#examples","text":"import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import itertools from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions # Initializing Classifiers clf1 = LogisticRegression(random_state=0) clf2 = RandomForestClassifier(random_state=0) clf3 = SVC(random_state=0, probability=True) eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[2, 1, 1], voting='soft') # Loading some example data X, y = iris_data() X = X[:,[0, 2]] # Plotting Decision Regions gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10, 8)) labels = ['Logistic Regression', 'Random Forest', 'RBF kernel SVM', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() If you use mlxtend as part of your workflow in a scientific publication, please consider citing the mlxtend repository with the following DOI: @article{raschkas_2018_mlxtend, author = {Sebastian Raschka}, title = {MLxtend: Providing machine learning and data science utilities and extensions to Python\u2019s scientific computing stack}, journal = {The Journal of Open Source Software}, volume = {3}, number = {24}, month = apr, year = 2018, publisher = {The Open Journal}, doi = {10.21105/joss.00638}, url = {http://joss.theoj.org/papers/10.21105/joss.00638} }","title":"Examples"},{"location":"#license","text":"This project is released under a permissive new BSD open source license ( LICENSE-BSD3.txt ) and commercially usable. There is no warranty; not even for merchantability or fitness for a particular purpose. In addition, you may use, copy, modify and redistribute all artistic creative works (figures and images) included in this distribution under the directory according to the terms and conditions of the Creative Commons Attribution 4.0 International License. See the file LICENSE-CC-BY.txt for details. (Computer-generated graphics such as the plots produced by matplotlib fall under the BSD license mentioned above).","title":"License"},{"location":"#contact","text":"I received a lot of feedback and questions about mlxtend recently, and I thought that it would be worthwhile to set up a public communication channel. Before you write an email with a question about mlxtend, please consider posting it here since it can also be useful to others! Please join the Google Groups Mailing List ! If Google Groups is not for you, please feel free to write me an email or consider filing an issue on GitHub's issue tracker for new feature requests or bug reports. In addition, I setup a Gitter channel for live discussions.","title":"Contact"},{"location":"CHANGELOG/","text":"Release Notes The CHANGELOG for the current development version is available at https://github.com/rasbt/mlxtend/blob/master/docs/sources/CHANGELOG.md . Version 0.14.0 (11-09-2018) Downloads Source code (zip) Source code (tar.gz) New Features Added a scatterplotmatrix function to the plotting module. ( #437 ) Added sample_weight option to StackingRegressor , StackingClassifier , StackingCVRegressor , StackingCVClassifier , EnsembleVoteClassifier . ( #438 ) Added a RandomHoldoutSplit class to perform a random train/valid split without rotation in SequentialFeatureSelector , scikit-learn GridSearchCV etc. ( #442 ) Added a PredefinedHoldoutSplit class to perform a train/valid split, based on user-specified indices, without rotation in SequentialFeatureSelector , scikit-learn GridSearchCV etc. ( #443 ) Created a new mlxtend.image submodule for working on image processing-related tasks. ( #457 ) Added a new convenience function extract_face_landmarks based on dlib to mlxtend.image . ( #458 ) Added a method='oob' option to the mlxtend.evaluate.bootstrap_point632_score method to compute the classic out-of-bag bootstrap estimate ( #459 ) Added a method='.632+' option to the mlxtend.evaluate.bootstrap_point632_score method to compute the .632+ bootstrap estimate that addresses the optimism bias of the .632 bootstrap ( #459 ) Added a new mlxtend.evaluate.ftest function to perform an F-test for comparing the accuracies of two or more classification models. ( #460 ) Added a new mlxtend.evaluate.combined_ftest_5x2cv function to perform an combined 5x2cv F-Test for comparing the performance of two models. ( #461 ) Added a new mlxtend.evaluate.difference_proportions test for comparing two proportions (e.g., classifier accuracies) ( #462 ) Changes Addressed deprecations warnings in NumPy 0.15. ( #425 ) Because of complications in PR ( #459 ), Python 2.7 was now dropped; since official support for Python 2.7 by the Python Software Foundation is ending in approx. 12 months anyways, this re-focussing will hopefully free up some developer time with regard to not having to worry about backward compatibility Bug Fixes Fixed an issue with a missing import in mlxtend.plotting.plot_confusion_matrix . ( #428 ) Version 0.13.0 (2018-07-20) Downloads Source code (zip) Source code (tar.gz) New Features A meaningful error message is now raised when a cross-validation generator is used with SequentialFeatureSelector . ( #377 ) The SequentialFeatureSelector now accepts custom feature names via the fit method for more interpretable feature subset reports. ( #379 ) The SequentialFeatureSelector is now also compatible with Pandas DataFrames and uses DataFrame column-names for more interpretable feature subset reports. ( #379 ) ColumnSelector now works with Pandas DataFrames columns. ( #378 by Manuel Garrido ) The ExhaustiveFeatureSelector estimator in mlxtend.feature_selection now is safely stoppable mid-process by control+c. ( #380 ) Two new functions, vectorspace_orthonormalization and vectorspace_dimensionality were added to mlxtend.math to use the Gram-Schmidt process to convert a set of linearly independent vectors into a set of orthonormal basis vectors, and to compute the dimensionality of a vectorspace, respectively. ( #382 ) mlxtend.frequent_patterns.apriori now supports pandas SparseDataFrame s to generate frequent itemsets. ( #404 via Daniel Morales ) The plot_confusion_matrix function now has the ability to show normalized confusion matrix coefficients in addition to or instead of absolute confusion matrix coefficients with or without a colorbar. The text display method has been changed so that the full range of the colormap is used. The default size is also now set based on the number of classes. Added support for merging the meta features with the original input features in StackingRegressor (via use_features_in_secondary ) like it is already supported in the other Stacking classes. ( #418 ) Added a support_only to the association_rules function, which allow constructing association rules (based on the support metric only) for cropped input DataFrames that don't contain a complete set of antecedent and consequent support values. ( #421 ) Changes Itemsets generated with apriori are now frozenset s ( #393 by William Laney and #394 ) Now raises an error if a input DataFrame to apriori contains non 0, 1, True, False values. #419 ) Bug Fixes Allow mlxtend estimators to be cloned via scikit-learn's clone function. ( #374 ) Fixes bug to allow the correct use of refit=False in StackingRegressor and StackingCVRegressor ( #384 and ( #385 ) by selay01 ) Allow StackingClassifier to work with sparse matrices when use_features_in_secondary=True ( #408 by Floris Hoogenbook ) Allow StackingCVRegressor to work with sparse matrices when use_features_in_secondary=True ( #416 ) Allow StackingCVClassifier to work with sparse matrices when use_features_in_secondary=True ( #417 ) Version 0.12.0 (2018-21-04) Downloads Source code (zip) Source code (tar.gz) New Features A new feature_importance_permuation function to compute the feature importance in classifiers and regressors via the permutation importance method ( #358 ) The fit method of the ExhaustiveFeatureSelector now optionally accepts **fit_params for the estimator that is used for the feature selection. ( #354 by Zach Griffith) The fit method of the SequentialFeatureSelector now optionally accepts **fit_params for the estimator that is used for the feature selection. ( #350 by Zach Griffith) Changes Replaced plot_decision_regions colors by a colorblind-friendly palette and adds contour lines for decision regions. ( #348 ) All stacking estimators now raise NonFittedErrors if any method for inference is called prior to fitting the estimator. ( #353 ) Renamed the refit parameter of both the StackingClassifier and StackingCVClassifier to use_clones to be more explicit and less misleading. ( #368 ) Bug Fixes Various changes in the documentation and documentation tools to fix formatting issues ( #363 ) Fixes a bug where the StackingCVClassifier 's meta features were not stored in the original order when shuffle=True ( #370 ) Many documentation improvements, including links to the User Guides in the API docs ( #371 ) Version 0.11.0 (2018-03-14) Downloads Source code (zip) Source code (tar.gz) New Features New function implementing the resampled paired t-test procedure ( paired_ttest_resampled ) to compare the performance of two models. ( #323 ) New function implementing the k-fold paired t-test procedure ( paired_ttest_kfold_cv ) to compare the performance of two models (also called k-hold-out paired t-test). ( #324 ) New function implementing the 5x2cv paired t-test procedure ( paired_ttest_5x2cv ) proposed by Dieterrich (1998) to compare the performance of two models. ( #325 ) A refit parameter was added to stacking classes (similar to the refit parameter in the EnsembleVoteClassifier ), to support classifiers and regressors that follow the scikit-learn API but are not compatible with scikit-learn's clone function. ( #322 ) The ColumnSelector now has a drop_axis argument to use it in pipelines with CountVectorizers . ( #333 ) Changes Raises an informative error message if predict or predict_meta_features is called prior to calling the fit method in StackingRegressor and StackingCVRegressor . ( #315 ) The plot_decision_regions function now automatically determines the optimal setting based on the feature dimensions and supports anti-aliasing. The old res parameter has been deprecated. ( #309 by Guillaume Poirier-Morency ) Apriori code is faster due to optimization in onehot transformation and the amount of candidates generated by the apriori algorithm. ( #327 by Jakub Smid ) The OnehotTransactions class (which is typically often used in combination with the apriori function for association rule mining) is now more memory efficient as it uses boolean arrays instead of integer arrays. In addition, the OnehotTransactions class can be now be provided with sparse argument to generate sparse representations of the onehot matrix to further improve memory efficiency. ( #328 by Jakub Smid ) The OneHotTransactions has been deprecated and replaced by the TransactionEncoder . ( #332 The plot_decision_regions function now has three new parameters, scatter_kwargs , contourf_kwargs , and scatter_highlight_kwargs , that can be used to modify the plotting style. ( #342 by James Bourbeau ) Bug Fixes Fixed issue when class labels were provided to the EnsembleVoteClassifier when refit was set to false . ( #322 ) Allow arrays with 16-bit and 32-bit precision in plot_decision_regions function. ( #337 ) Fixed bug that raised an indexing error if the number of items was <= 1 when computing association rules using the conviction metric. ( #340 ) Version 0.10.0 (2017-12-22) Downloads Source code (zip) Source code (tar.gz) New Features New store_train_meta_features parameter for fit in StackingCVRegressor. if True, train meta-features are stored in self.train_meta_features_ . New pred_meta_features method for StackingCVRegressor . People can get test meta-features using this method. ( #294 via takashioya ) The new store_train_meta_features attribute and pred_meta_features method for the StackingCVRegressor were also added to the StackingRegressor , StackingClassifier , and StackingCVClassifier ( #299 & #300 ) New function ( evaluate.mcnemar_tables ) for creating multiple 2x2 contigency from model predictions arrays that can be used in multiple McNemar (post-hoc) tests or Cochran's Q or F tests, etc. ( #307 ) New function ( evaluate.cochrans_q ) for performing Cochran's Q test to compare the accuracy of multiple classifiers. ( #310 ) Changes Added requirements.txt to setup.py . ( #304 via Colin Carrol ) Bug Fixes Improved numerical stability for p-values computed via the the exact McNemar test ( #306 ) nose is not required to use the library ( #302 ) Version 0.9.1 (2017-11-19) Downloads Source code (zip) Source code (tar.gz) New Features Added mlxtend.evaluate.bootstrap_point632_score to evaluate the performance of estimators using the .632 bootstrap. ( #283 ) New max_len parameter for the frequent itemset generation via the apriori function to allow for early stopping. ( #270 ) Changes All feature index tuples in SequentialFeatureSelector or now in sorted order. ( #262 ) The SequentialFeatureSelector now runs the continuation of the floating inclusion/exclusion as described in Novovicova & Kittler (1994). Note that this didn't cause any difference in performance on any of the test scenarios but could lead to better performance in certain edge cases. ( #262 ) utils.Counter now accepts a name variable to help distinguish between multiple counters, time precision can be set with the 'precision' kwarg and the new attribute end_time holds the time the last iteration completed. ( #278 via Mathew Savage ) Bug Fixes Fixed an deprecation error that occured with McNemar test when using SciPy 1.0. ( #283 ) Version 0.9.0 (2017-10-21) Downloads Source code (zip) Source code (tar.gz) New Features Added evaluate.permutation_test , a permutation test for hypothesis testing (or A/B testing) to test if two samples come from the same distribution. Or in other words, a procedure to test the null hypothesis that that two groups are not significantly different (e.g., a treatment and a control group). ( #250 ) Added 'leverage' and 'conviction as evaluation metrics to the frequent_patterns.association_rules function. ( #246 & #247 ) Added a loadings_ attribute to PrincipalComponentAnalysis to compute the factor loadings of the features on the principal components. ( #251 ) Allow grid search over classifiers/regressors in ensemble and stacking estimators. ( #259 ) New make_multiplexer_dataset function that creates a dataset generated by a n-bit Boolean multiplexer for evaluating supervised learning algorithms. ( #263 ) Added a new BootstrapOutOfBag class, an implementation of the out-of-bag bootstrap to evaluate supervised learning algorithms. ( #265 ) The parameters for StackingClassifier , StackingCVClassifier , StackingRegressor , StackingCVRegressor , and EnsembleVoteClassifier can now be tuned using scikit-learn's GridSearchCV ( #254 via James Bourbeau ) Changes The 'support' column returned by frequent_patterns.association_rules was changed to compute the support of \"antecedant union consequent\", and new antecedant support' and 'consequent support' column were added to avoid ambiguity. ( #245 ) Allow the OnehotTransactions to be cloned via scikit-learn's clone function, which is required by e.g., scikit-learn's FeatureUnion or GridSearchCV (via Iaroslav Shcherbatyi ). ( #249 ) Bug Fixes Fix issues with self._init_time parameter in _IterativeModel subclasses. ( #256 ) Fix imprecision bug that occurred in plot_ecdf when run on Python 2.7. ( 264 ) The vectors from SVD in PrincipalComponentAnalysis are now being scaled so that the eigenvalues via solver='eigen' and solver='svd' now store eigenvalues that have the same magnitudes. ( #251 ) Version 0.8.0 (2017-09-09) Downloads Source code (zip) Source code (tar.gz) New Features Added a mlxtend.evaluate.bootstrap that implements the ordinary nonparametric bootstrap to bootstrap a single statistic (for example, the mean. median, R^2 of a regression fit, and so forth) #232 SequentialFeatureSelecor 's k_features now accepts a string argument \"best\" or \"parsimonious\" for more \"automated\" feature selection. For instance, if \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. #238 Changes SequentialFeatureSelector now uses np.nanmean over normal mean to support scorers that may return np.nan #211 (via mrkaiser ) The skip_if_stuck parameter was removed from SequentialFeatureSelector in favor of a more efficient implementation comparing the conditional inclusion/exclusion results (in the floating versions) to the performances of previously sampled feature sets that were cached #237 ExhaustiveFeatureSelector was modified to consume substantially less memory #195 (via Adam Erickson ) Bug Fixes Fixed a bug where the SequentialFeatureSelector selected a feature subset larger than then specified via the k_features tuple max-value #213 Version 0.7.0 (2017-06-22) Downloads Source code (zip) Source code (tar.gz) New Features New mlxtend.plotting.ecdf function for plotting empirical cumulative distribution functions ( #196 ). New StackingCVRegressor for stacking regressors with out-of-fold predictions to prevent overfitting ( #201 via Eike Dehling ). Changes The TensorFlow estimator have been removed from mlxtend, since TensorFlow has now very convenient ways to build on estimators, which render those implementations obsolete. plot_decision_regions now supports plotting decision regions for more than 2 training features #189 , via James Bourbeau ). Parallel execution in mlxtend.feature_selection.SequentialFeatureSelector and mlxtend.feature_selection.ExhaustiveFeatureSelector is now performed over different feature subsets instead of the different cross-validation folds to better utilize machines with multiple processors if the number of features is large ( #193 , via @whalebot-helmsman ). Raise meaningful error messages if pandas DataFrame s or Python lists of lists are fed into the StackingCVClassifer as a fit arguments ( 198 ). The n_folds parameter of the StackingCVClassifier was changed to cv and can now accept any kind of cross validation technique that is available from scikit-learn. For example, StackingCVClassifier(..., cv=StratifiedKFold(n_splits=3)) or StackingCVClassifier(..., cv=GroupKFold(n_splits=3)) ( #203 , via Konstantinos Paliouras ). Bug Fixes SequentialFeatureSelector now correctly accepts a None argument for the scoring parameter to infer the default scoring metric from scikit-learn classifiers and regressors ( #171 ). The plot_decision_regions function now supports pre-existing axes objects generated via matplotlib's plt.subplots . ( #184 , see example ) Made math.num_combinations and math.num_permutations numerically stable for large numbers of combinations and permutations ( #200 ). Version 0.6.0 (2017-03-18) Downloads Source code (zip) Source code (tar.gz) New Features An association_rules function is implemented that allows to generate rules based on a list of frequent itemsets (via Joshua Goerner ). Changes Adds a black edgecolor to plots via plotting.plot_decision_regions to make markers more distinguishable from the background in matplotlib>=2.0 . The association submodule was renamed to frequent_patterns . Bug Fixes The DataFrame index of apriori results are now unique and ordered. Fixed typos in autompg and wine datasets (via James Bourbeau ). Version 0.5.1 (2017-02-14) Downloads Source code (zip) Source code (tar.gz) New Features The EnsembleVoteClassifier has a new refit attribute that prevents refitting classifiers if refit=False to save computational time. Added a new lift_score function in evaluate to compute lift score (via Batuhan Bardak ). StackingClassifier and StackingRegressor support multivariate targets if the underlying models do (via kernc ). StackingClassifier has a new use_features_in_secondary attribute like StackingCVClassifier . Changes Changed default verbosity level in SequentialFeatureSelector to 0 The EnsembleVoteClassifier now raises a NotFittedError if the estimator wasn't fit before calling predict . (via Anton Loss ) Added new TensorFlow variable initialization syntax to guarantee compatibility with TensorFlow 1.0 Bug Fixes Fixed wrong default value for k_features in SequentialFeatureSelector Cast selected feature subsets in the SequentialFeautureSelector as sets to prevent the iterator from getting stuck if the k_idx are different permutations of the same combination (via Zac Wellmer ). Fixed an issue with learning curves that caused the performance metrics to be reversed (via ipashchenko ) Fixed a bug that could occur in the SequentialFeatureSelector if there are similarly-well performing subsets in the floating variants (via Zac Wellmer ). Version 0.5.0 (2016-11-09) Downloads Source code (zip) Source code (tar.gz) New Features New ExhaustiveFeatureSelector estimator in mlxtend.feature_selection for evaluating all feature combinations in a specified range The StackingClassifier has a new parameter average_probas that is set to True by default to maintain the current behavior. A deprecation warning was added though, and it will default to False in future releases (0.6.0); average_probas=False will result in stacking of the level-1 predicted probabilities rather than averaging these. New StackingCVClassifier estimator in 'mlxtend.classifier' for implementing a stacking ensemble that uses cross-validation techniques for training the meta-estimator to avoid overfitting ( Reiichiro Nakano ) New OnehotTransactions encoder class added to the preprocessing submodule for transforming transaction data into a one-hot encoded array The SequentialFeatureSelector estimator in mlxtend.feature_selection now is safely stoppable mid-process by control+c, and deprecated print_progress in favor of a more tunable verbose parameter ( Will McGinnis ) New apriori function in association to extract frequent itemsets from transaction data for association rule mining New checkerboard_plot function in plotting to plot checkerboard tables / heat maps New mcnemar_table and mcnemar functions in evaluate to compute 2x2 contingency tables and McNemar's test Changes All plotting functions have been moved to mlxtend.plotting for compatibility reasons with continuous integration services and to make the installation of matplotlib optional for users of mlxtend 's core functionality Added a compatibility layer for scikit-learn 0.18 using the new model_selection module while maintaining backwards compatibility to scikit-learn 0.17. Bug Fixes mlxtend.plotting.plot_decision_regions now draws decision regions correctly if more than 4 class labels are present Raise AttributeError in plot_decision_regions when the X_higlight argument is a 1D array ( chkoar ) Version 0.4.2 (2016-08-24) Downloads Source code (zip) Source code (tar.gz) PDF documentation New Features Added preprocessing.CopyTransformer , a mock class that returns copies of imput arrays via transform and fit_transform Changes Added AppVeyor to CI to ensure MS Windows compatibility Dataset are now saved as compressed .txt or .csv files rather than being imported as Python objects feature_selection.SequentialFeatureSelector now supports the selection of k_features using a tuple to specify a \"min-max\" k_features range Added \"SVD solver\" option to the PrincipalComponentAnalysis Raise a AttributeError with \"not fitted\" message in SequentialFeatureSelector if transform or get_metric_dict are called prior to fit Use small, positive bias units in TfMultiLayerPerceptron 's hidden layer(s) if the activations are ReLUs in order to avoid dead neurons Added an optional clone_estimator parameter to the SequentialFeatureSelector that defaults to True , avoiding the modification of the original estimator objects More rigorous type and shape checks in the evaluate.plot_decision_regions function DenseTransformer now doesn't raise and error if the input array is not sparse API clean-up using scikit-learn's BaseEstimator as parent class for feature_selection.ColumnSelector Bug Fixes Fixed a problem when a tuple-range was provided as argument to the SequentialFeatureSelector 's k_features parameter and the scoring metric was more negative than -1 (e.g., as in scikit-learn's MSE scoring function) (wahutch](https://github.com/wahutch)) Fixed an AttributeError issue when verbose > 1 in StackingClassifier Fixed a bug in classifier.SoftmaxRegression where the mean values of the offsets were used to update the bias units rather than their sum Fixed rare bug in MLP _layer_mapping functions that caused a swap between the random number generation seed when initializing weights and biases Version 0.4.1 (2016-05-01) Downloads Source code (zip) Source code (tar.gz) PDF documentation New Features New TensorFlow estimator for Linear Regression ( tf_regressor.TfLinearRegression ) New k-means clustering estimator ( cluster.Kmeans ) New TensorFlow k-means clustering estimator ( tf_cluster.Kmeans ) Changes Due to refactoring of the estimator classes, the init_weights parameter of the fit methods was globally renamed to init_params Overall performance improvements of estimators due to code clean-up and refactoring Added several additional checks for correct array types and more meaningful exception messages Added optional dropout to the tf_classifier.TfMultiLayerPerceptron classifier for regularization Added an optional decay parameter to the tf_classifier.TfMultiLayerPerceptron classifier for adaptive learning via an exponential decay of the learning rate eta Replaced old NeuralNetMLP by more streamlined MultiLayerPerceptron ( classifier.MultiLayerPerceptron ); now also with softmax in the output layer and categorical cross-entropy loss. Unified init_params parameter for fit functions to continue training where the algorithm left off (if supported) Version 0.4.0 (2016-04-09) New Features New TfSoftmaxRegression classifier using Tensorflow ( tf_classifier.TfSoftmaxRegression ) New SoftmaxRegression classifier ( classifier.SoftmaxRegression ) New TfMultiLayerPerceptron classifier using Tensorflow ( tf_classifier.TfMultiLayerPerceptron ) New StackingRegressor ( regressor.StackingRegressor ) New StackingClassifier ( classifier.StackingClassifier ) New function for one-hot encoding of class labels ( preprocessing.one_hot ) Added GridSearch support to the SequentialFeatureSelector ( feature_selection/.SequentialFeatureSelector ) evaluate.plot_decision_regions improvements: Function now handles class y-class labels correctly if array is of type float Correct handling of input arguments markers and colors Accept an existing Axes via the ax argument New print_progress parameter for all generalized models and multi-layer neural networks for printing time elapsed, ETA, and the current cost of the current epoch Minibatch learning for classifier.LogisticRegression , classifier.Adaline , and regressor.LinearRegression plus streamlined API New Principal Component Analysis class via mlxtend.feature_extraction.PrincipalComponentAnalysis New RBF Kernel Principal Component Analysis class via mlxtend.feature_extraction.RBFKernelPCA New Linear Discriminant Analysis class via mlxtend.feature_extraction.LinearDiscriminantAnalysis Changes The column parameter in mlxtend.preprocessing.standardize now defaults to None to standardize all columns more conveniently Version 0.3.0 (2016-01-31) Downloads Source code (zip) Source code (tar.gz) New Features Added a progress bar tracker to classifier.NeuralNetMLP Added a function to score predicted vs. target class labels evaluate.scoring Added confusion matrix functions to create ( evaluate.confusion_matrix ) and plot ( evaluate.plot_confusion_matrix ) confusion matrices New style parameter and improved axis scaling in mlxtend.evaluate.plot_learning_curves Added loadlocal_mnist to mlxtend.data for streaming MNIST from a local byte files into numpy arrays New NeuralNetMLP parameters: random_weights , shuffle_init , shuffle_epoch New SFS features such as the generation of pandas DataFrame results tables and plotting functions (with confidence intervals, standard deviation, and standard error bars) Added support for regression estimators in SFS Added Boston housing dataset New shuffle parameter for classifier.NeuralNetMLP Changes The mlxtend.preprocessing.standardize function now optionally returns the parameters, which are estimated from the array, for re-use. A further improvement makes the standardize function smarter in order to avoid zero-division errors Cosmetic improvements to the evaluate.plot_decision_regions function such as hiding plot axes Renaming of classifier.EnsembleClassfier to classifier.EnsembleVoteClassifier Improved random weight initialization in Perceptron , Adaline , LinearRegression , and LogisticRegression Changed learning parameter of mlxtend.classifier.Adaline to solver and added \"normal equation\" as closed-form solution solver Hide y-axis labels in mlxtend.evaluate.plot_decision_regions in 1 dimensional evaluations Sequential Feature Selection algorithms were unified into a single SequentialFeatureSelector class with parameters to enable floating selection and toggle between forward and backward selection. Stratified sampling of MNIST (now 500x random samples from each of the 10 digit categories) Renaming mlxtend.plotting to mlxtend.general_plotting in order to distinguish general plotting function from specialized utility function such as evaluate.plot_decision_regions Version 0.2.9 (2015-07-14) Downloads Source code (zip) Source code (tar.gz) New Features Sequential Feature Selection algorithms: SFS, SFFS, SBS, and SFBS Changes Changed regularization & lambda parameters in LogisticRegression to single parameter l2_lambda Version 0.2.8 (2015-06-27) API changes: mlxtend.sklearn.EnsembleClassifier -> mlxtend.classifier.EnsembleClassifier mlxtend.sklearn.ColumnSelector -> mlxtend.feature_selection.ColumnSelector mlxtend.sklearn.DenseTransformer -> mlxtend.preprocessing.DenseTransformer mlxtend.pandas.standardizing -> mlxtend.preprocessing.standardizing mlxtend.pandas.minmax_scaling -> mlxtend.preprocessing.minmax_scaling mlxtend.matplotlib -> mlxtend.plotting Added momentum learning parameter (alpha coefficient) to mlxtend.classifier.NeuralNetMLP . Added adaptive learning rate (decrease constant) to mlxtend.classifier.NeuralNetMLP . mlxtend.pandas.minmax_scaling became mlxtend.preprocessing.minmax_scaling and also supports NumPy arrays now mlxtend.pandas.standardizing became mlxtend.preprocessing.standardizing and now supports both NumPy arrays and pandas DataFrames; also, now ddof parameters to set the degrees of freedom when calculating the standard deviation Version 0.2.7 (2015-06-20) Added multilayer perceptron (feedforward artificial neural network) classifier as mlxtend.classifier.NeuralNetMLP . Added 5000 labeled trainingsamples from the MNIST handwritten digits dataset to mlxtend.data Version 0.2.6 (2015-05-08) Added ordinary least square regression using different solvers (gradient and stochastic gradient descent, and the closed form solution (normal equation) Added option for random weight initialization to logistic regression classifier and updated l2 regularization Added wine dataset to mlxtend.data Added invert_axes parameter mlxtend.matplotlib.enrichtment_plot to optionally plot the \"Count\" on the x-axis New verbose parameter for mlxtend.sklearn.EnsembleClassifier by Alejandro C. Bahnsen Added mlxtend.pandas.standardizing to standardize columns in a Pandas DataFrame Added parameters linestyles and markers to mlxtend.matplotlib.enrichment_plot mlxtend.regression.lin_regplot automatically adds np.newaxis and works w. python lists Added tokenizers: mlxtend.text.extract_emoticons and mlxtend.text.extract_words_and_emoticons Version 0.2.5 (2015-04-17) Added Sequential Backward Selection (mlxtend.sklearn.SBS) Added X_highlight parameter to mlxtend.evaluate.plot_decision_regions for highlighting test data points. Added mlxtend.regression.lin_regplot to plot the fitted line from linear regression. Added mlxtend.matplotlib.stacked_barplot to conveniently produce stacked barplots using pandas DataFrame s. Added mlxtend.matplotlib.enrichment_plot Version 0.2.4 (2015-03-15) Added scoring to mlxtend.evaluate.learning_curves (by user pfsq) Fixed setup.py bug caused by the missing README.html file matplotlib.category_scatter for pandas DataFrames and Numpy arrays Version 0.2.3 (2015-03-11) Added Logistic regression Gradient descent and stochastic gradient descent perceptron was changed to Adaline (Adaptive Linear Neuron) Perceptron and Adaline for {0, 1} classes Added mlxtend.preprocessing.shuffle_arrays_unison function to shuffle one or more NumPy arrays. Added shuffle and random seed parameter to stochastic gradient descent classifier. Added rstrip parameter to mlxtend.file_io.find_filegroups to allow trimming of base names. Added ignore_substring parameter to mlxtend.file_io.find_filegroups and find_files . Replaced .rstrip in mlxtend.file_io.find_filegroups with more robust regex. Gridsearch support for mlxtend.sklearn.EnsembleClassifier Version 0.2.2 (2015-03-01) Improved robustness of EnsembleClassifier. Extended plot_decision_regions() functionality for plotting 1D decision boundaries. Function matplotlib.plot_decision_regions was reorganized to evaluate.plot_decision_regions . evaluate.plot_learning_curves() function added. Added Rosenblatt, gradient descent, and stochastic gradient descent perceptrons. Version 0.2.1 (2015-01-20) Added mlxtend.pandas.minmax_scaling - a function to rescale pandas DataFrame columns. Slight update to the EnsembleClassifier interface (additional voting parameter) Fixed EnsembleClassifier to return correct class labels if class labels are not integers from 0 to n. Added new matplotlib function to plot decision regions of classifiers. Version 0.2.0 (2015-01-13) Improved mlxtend.text.generalize_duplcheck to remove duplicates and prevent endless looping issue. Added recursive search parameter to mlxtend.file_io.find_files. Added check_ext parameter mlxtend.file_io.find_files to search based on file extensions. Default parameter to ignore invisible files for mlxtend.file_io.find. Added transform and fit_transform to the EnsembleClassifier . Added mlxtend.file_io.find_filegroups function. Version 0.1.9 (2015-01-10) Implemented scikit-learn EnsembleClassifier (majority voting rule) class. Version 0.1.8 (2015-01-07) Improvements to mlxtend.text.generalize_names to handle certain Dutch last name prefixes (van, van der, de, etc.). Added mlxtend.text.generalize_name_duplcheck function to apply mlxtend.text.generalize_names function to a pandas DataFrame without creating duplicates. Version 0.1.7 (2015-01-07) Added text utilities with name generalization function. Added and file_io utilities. Version 0.1.6 (2015-01-04) Added combinations and permutations estimators. Version 0.1.5 (2014-12-11) Added DenseTransformer for pipelines and grid search. Version 0.1.4 (2014-08-20) mean_centering function is now a Class that creates MeanCenterer objects that can be used to fit data via the fit method, and center data at the column means via the transform and fit_transform method. Version 0.1.3 (2014-08-19) Added preprocessing module and mean_centering function. Version 0.1.2 (2014-08-19) Added matplotlib utilities and remove_borders function. Version 0.1.1 (2014-08-13) Simplified code for ColumnSelector.","title":"Release Notes"},{"location":"CHANGELOG/#release-notes","text":"The CHANGELOG for the current development version is available at https://github.com/rasbt/mlxtend/blob/master/docs/sources/CHANGELOG.md .","title":"Release Notes"},{"location":"CHANGELOG/#version-0140-11-09-2018","text":"","title":"Version 0.14.0 (11-09-2018)"},{"location":"CHANGELOG/#downloads","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features","text":"Added a scatterplotmatrix function to the plotting module. ( #437 ) Added sample_weight option to StackingRegressor , StackingClassifier , StackingCVRegressor , StackingCVClassifier , EnsembleVoteClassifier . ( #438 ) Added a RandomHoldoutSplit class to perform a random train/valid split without rotation in SequentialFeatureSelector , scikit-learn GridSearchCV etc. ( #442 ) Added a PredefinedHoldoutSplit class to perform a train/valid split, based on user-specified indices, without rotation in SequentialFeatureSelector , scikit-learn GridSearchCV etc. ( #443 ) Created a new mlxtend.image submodule for working on image processing-related tasks. ( #457 ) Added a new convenience function extract_face_landmarks based on dlib to mlxtend.image . ( #458 ) Added a method='oob' option to the mlxtend.evaluate.bootstrap_point632_score method to compute the classic out-of-bag bootstrap estimate ( #459 ) Added a method='.632+' option to the mlxtend.evaluate.bootstrap_point632_score method to compute the .632+ bootstrap estimate that addresses the optimism bias of the .632 bootstrap ( #459 ) Added a new mlxtend.evaluate.ftest function to perform an F-test for comparing the accuracies of two or more classification models. ( #460 ) Added a new mlxtend.evaluate.combined_ftest_5x2cv function to perform an combined 5x2cv F-Test for comparing the performance of two models. ( #461 ) Added a new mlxtend.evaluate.difference_proportions test for comparing two proportions (e.g., classifier accuracies) ( #462 )","title":"New Features"},{"location":"CHANGELOG/#changes","text":"Addressed deprecations warnings in NumPy 0.15. ( #425 ) Because of complications in PR ( #459 ), Python 2.7 was now dropped; since official support for Python 2.7 by the Python Software Foundation is ending in approx. 12 months anyways, this re-focussing will hopefully free up some developer time with regard to not having to worry about backward compatibility","title":"Changes"},{"location":"CHANGELOG/#bug-fixes","text":"Fixed an issue with a missing import in mlxtend.plotting.plot_confusion_matrix . ( #428 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-0130-2018-07-20","text":"","title":"Version 0.13.0 (2018-07-20)"},{"location":"CHANGELOG/#downloads_1","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_1","text":"A meaningful error message is now raised when a cross-validation generator is used with SequentialFeatureSelector . ( #377 ) The SequentialFeatureSelector now accepts custom feature names via the fit method for more interpretable feature subset reports. ( #379 ) The SequentialFeatureSelector is now also compatible with Pandas DataFrames and uses DataFrame column-names for more interpretable feature subset reports. ( #379 ) ColumnSelector now works with Pandas DataFrames columns. ( #378 by Manuel Garrido ) The ExhaustiveFeatureSelector estimator in mlxtend.feature_selection now is safely stoppable mid-process by control+c. ( #380 ) Two new functions, vectorspace_orthonormalization and vectorspace_dimensionality were added to mlxtend.math to use the Gram-Schmidt process to convert a set of linearly independent vectors into a set of orthonormal basis vectors, and to compute the dimensionality of a vectorspace, respectively. ( #382 ) mlxtend.frequent_patterns.apriori now supports pandas SparseDataFrame s to generate frequent itemsets. ( #404 via Daniel Morales ) The plot_confusion_matrix function now has the ability to show normalized confusion matrix coefficients in addition to or instead of absolute confusion matrix coefficients with or without a colorbar. The text display method has been changed so that the full range of the colormap is used. The default size is also now set based on the number of classes. Added support for merging the meta features with the original input features in StackingRegressor (via use_features_in_secondary ) like it is already supported in the other Stacking classes. ( #418 ) Added a support_only to the association_rules function, which allow constructing association rules (based on the support metric only) for cropped input DataFrames that don't contain a complete set of antecedent and consequent support values. ( #421 )","title":"New Features"},{"location":"CHANGELOG/#changes_1","text":"Itemsets generated with apriori are now frozenset s ( #393 by William Laney and #394 ) Now raises an error if a input DataFrame to apriori contains non 0, 1, True, False values. #419 )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_1","text":"Allow mlxtend estimators to be cloned via scikit-learn's clone function. ( #374 ) Fixes bug to allow the correct use of refit=False in StackingRegressor and StackingCVRegressor ( #384 and ( #385 ) by selay01 ) Allow StackingClassifier to work with sparse matrices when use_features_in_secondary=True ( #408 by Floris Hoogenbook ) Allow StackingCVRegressor to work with sparse matrices when use_features_in_secondary=True ( #416 ) Allow StackingCVClassifier to work with sparse matrices when use_features_in_secondary=True ( #417 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-0120-2018-21-04","text":"","title":"Version 0.12.0 (2018-21-04)"},{"location":"CHANGELOG/#downloads_2","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_2","text":"A new feature_importance_permuation function to compute the feature importance in classifiers and regressors via the permutation importance method ( #358 ) The fit method of the ExhaustiveFeatureSelector now optionally accepts **fit_params for the estimator that is used for the feature selection. ( #354 by Zach Griffith) The fit method of the SequentialFeatureSelector now optionally accepts **fit_params for the estimator that is used for the feature selection. ( #350 by Zach Griffith)","title":"New Features"},{"location":"CHANGELOG/#changes_2","text":"Replaced plot_decision_regions colors by a colorblind-friendly palette and adds contour lines for decision regions. ( #348 ) All stacking estimators now raise NonFittedErrors if any method for inference is called prior to fitting the estimator. ( #353 ) Renamed the refit parameter of both the StackingClassifier and StackingCVClassifier to use_clones to be more explicit and less misleading. ( #368 )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_2","text":"Various changes in the documentation and documentation tools to fix formatting issues ( #363 ) Fixes a bug where the StackingCVClassifier 's meta features were not stored in the original order when shuffle=True ( #370 ) Many documentation improvements, including links to the User Guides in the API docs ( #371 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-0110-2018-03-14","text":"","title":"Version 0.11.0 (2018-03-14)"},{"location":"CHANGELOG/#downloads_3","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_3","text":"New function implementing the resampled paired t-test procedure ( paired_ttest_resampled ) to compare the performance of two models. ( #323 ) New function implementing the k-fold paired t-test procedure ( paired_ttest_kfold_cv ) to compare the performance of two models (also called k-hold-out paired t-test). ( #324 ) New function implementing the 5x2cv paired t-test procedure ( paired_ttest_5x2cv ) proposed by Dieterrich (1998) to compare the performance of two models. ( #325 ) A refit parameter was added to stacking classes (similar to the refit parameter in the EnsembleVoteClassifier ), to support classifiers and regressors that follow the scikit-learn API but are not compatible with scikit-learn's clone function. ( #322 ) The ColumnSelector now has a drop_axis argument to use it in pipelines with CountVectorizers . ( #333 )","title":"New Features"},{"location":"CHANGELOG/#changes_3","text":"Raises an informative error message if predict or predict_meta_features is called prior to calling the fit method in StackingRegressor and StackingCVRegressor . ( #315 ) The plot_decision_regions function now automatically determines the optimal setting based on the feature dimensions and supports anti-aliasing. The old res parameter has been deprecated. ( #309 by Guillaume Poirier-Morency ) Apriori code is faster due to optimization in onehot transformation and the amount of candidates generated by the apriori algorithm. ( #327 by Jakub Smid ) The OnehotTransactions class (which is typically often used in combination with the apriori function for association rule mining) is now more memory efficient as it uses boolean arrays instead of integer arrays. In addition, the OnehotTransactions class can be now be provided with sparse argument to generate sparse representations of the onehot matrix to further improve memory efficiency. ( #328 by Jakub Smid ) The OneHotTransactions has been deprecated and replaced by the TransactionEncoder . ( #332 The plot_decision_regions function now has three new parameters, scatter_kwargs , contourf_kwargs , and scatter_highlight_kwargs , that can be used to modify the plotting style. ( #342 by James Bourbeau )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_3","text":"Fixed issue when class labels were provided to the EnsembleVoteClassifier when refit was set to false . ( #322 ) Allow arrays with 16-bit and 32-bit precision in plot_decision_regions function. ( #337 ) Fixed bug that raised an indexing error if the number of items was <= 1 when computing association rules using the conviction metric. ( #340 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-0100-2017-12-22","text":"","title":"Version 0.10.0 (2017-12-22)"},{"location":"CHANGELOG/#downloads_4","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_4","text":"New store_train_meta_features parameter for fit in StackingCVRegressor. if True, train meta-features are stored in self.train_meta_features_ . New pred_meta_features method for StackingCVRegressor . People can get test meta-features using this method. ( #294 via takashioya ) The new store_train_meta_features attribute and pred_meta_features method for the StackingCVRegressor were also added to the StackingRegressor , StackingClassifier , and StackingCVClassifier ( #299 & #300 ) New function ( evaluate.mcnemar_tables ) for creating multiple 2x2 contigency from model predictions arrays that can be used in multiple McNemar (post-hoc) tests or Cochran's Q or F tests, etc. ( #307 ) New function ( evaluate.cochrans_q ) for performing Cochran's Q test to compare the accuracy of multiple classifiers. ( #310 )","title":"New Features"},{"location":"CHANGELOG/#changes_4","text":"Added requirements.txt to setup.py . ( #304 via Colin Carrol )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_4","text":"Improved numerical stability for p-values computed via the the exact McNemar test ( #306 ) nose is not required to use the library ( #302 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-091-2017-11-19","text":"","title":"Version 0.9.1 (2017-11-19)"},{"location":"CHANGELOG/#downloads_5","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_5","text":"Added mlxtend.evaluate.bootstrap_point632_score to evaluate the performance of estimators using the .632 bootstrap. ( #283 ) New max_len parameter for the frequent itemset generation via the apriori function to allow for early stopping. ( #270 )","title":"New Features"},{"location":"CHANGELOG/#changes_5","text":"All feature index tuples in SequentialFeatureSelector or now in sorted order. ( #262 ) The SequentialFeatureSelector now runs the continuation of the floating inclusion/exclusion as described in Novovicova & Kittler (1994). Note that this didn't cause any difference in performance on any of the test scenarios but could lead to better performance in certain edge cases. ( #262 ) utils.Counter now accepts a name variable to help distinguish between multiple counters, time precision can be set with the 'precision' kwarg and the new attribute end_time holds the time the last iteration completed. ( #278 via Mathew Savage )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_5","text":"Fixed an deprecation error that occured with McNemar test when using SciPy 1.0. ( #283 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-090-2017-10-21","text":"","title":"Version 0.9.0 (2017-10-21)"},{"location":"CHANGELOG/#downloads_6","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_6","text":"Added evaluate.permutation_test , a permutation test for hypothesis testing (or A/B testing) to test if two samples come from the same distribution. Or in other words, a procedure to test the null hypothesis that that two groups are not significantly different (e.g., a treatment and a control group). ( #250 ) Added 'leverage' and 'conviction as evaluation metrics to the frequent_patterns.association_rules function. ( #246 & #247 ) Added a loadings_ attribute to PrincipalComponentAnalysis to compute the factor loadings of the features on the principal components. ( #251 ) Allow grid search over classifiers/regressors in ensemble and stacking estimators. ( #259 ) New make_multiplexer_dataset function that creates a dataset generated by a n-bit Boolean multiplexer for evaluating supervised learning algorithms. ( #263 ) Added a new BootstrapOutOfBag class, an implementation of the out-of-bag bootstrap to evaluate supervised learning algorithms. ( #265 ) The parameters for StackingClassifier , StackingCVClassifier , StackingRegressor , StackingCVRegressor , and EnsembleVoteClassifier can now be tuned using scikit-learn's GridSearchCV ( #254 via James Bourbeau )","title":"New Features"},{"location":"CHANGELOG/#changes_6","text":"The 'support' column returned by frequent_patterns.association_rules was changed to compute the support of \"antecedant union consequent\", and new antecedant support' and 'consequent support' column were added to avoid ambiguity. ( #245 ) Allow the OnehotTransactions to be cloned via scikit-learn's clone function, which is required by e.g., scikit-learn's FeatureUnion or GridSearchCV (via Iaroslav Shcherbatyi ). ( #249 )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_6","text":"Fix issues with self._init_time parameter in _IterativeModel subclasses. ( #256 ) Fix imprecision bug that occurred in plot_ecdf when run on Python 2.7. ( 264 ) The vectors from SVD in PrincipalComponentAnalysis are now being scaled so that the eigenvalues via solver='eigen' and solver='svd' now store eigenvalues that have the same magnitudes. ( #251 )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-080-2017-09-09","text":"","title":"Version 0.8.0 (2017-09-09)"},{"location":"CHANGELOG/#downloads_7","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_7","text":"Added a mlxtend.evaluate.bootstrap that implements the ordinary nonparametric bootstrap to bootstrap a single statistic (for example, the mean. median, R^2 of a regression fit, and so forth) #232 SequentialFeatureSelecor 's k_features now accepts a string argument \"best\" or \"parsimonious\" for more \"automated\" feature selection. For instance, if \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. #238","title":"New Features"},{"location":"CHANGELOG/#changes_7","text":"SequentialFeatureSelector now uses np.nanmean over normal mean to support scorers that may return np.nan #211 (via mrkaiser ) The skip_if_stuck parameter was removed from SequentialFeatureSelector in favor of a more efficient implementation comparing the conditional inclusion/exclusion results (in the floating versions) to the performances of previously sampled feature sets that were cached #237 ExhaustiveFeatureSelector was modified to consume substantially less memory #195 (via Adam Erickson )","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_7","text":"Fixed a bug where the SequentialFeatureSelector selected a feature subset larger than then specified via the k_features tuple max-value #213","title":"Bug Fixes"},{"location":"CHANGELOG/#version-070-2017-06-22","text":"","title":"Version 0.7.0 (2017-06-22)"},{"location":"CHANGELOG/#downloads_8","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_8","text":"New mlxtend.plotting.ecdf function for plotting empirical cumulative distribution functions ( #196 ). New StackingCVRegressor for stacking regressors with out-of-fold predictions to prevent overfitting ( #201 via Eike Dehling ).","title":"New Features"},{"location":"CHANGELOG/#changes_8","text":"The TensorFlow estimator have been removed from mlxtend, since TensorFlow has now very convenient ways to build on estimators, which render those implementations obsolete. plot_decision_regions now supports plotting decision regions for more than 2 training features #189 , via James Bourbeau ). Parallel execution in mlxtend.feature_selection.SequentialFeatureSelector and mlxtend.feature_selection.ExhaustiveFeatureSelector is now performed over different feature subsets instead of the different cross-validation folds to better utilize machines with multiple processors if the number of features is large ( #193 , via @whalebot-helmsman ). Raise meaningful error messages if pandas DataFrame s or Python lists of lists are fed into the StackingCVClassifer as a fit arguments ( 198 ). The n_folds parameter of the StackingCVClassifier was changed to cv and can now accept any kind of cross validation technique that is available from scikit-learn. For example, StackingCVClassifier(..., cv=StratifiedKFold(n_splits=3)) or StackingCVClassifier(..., cv=GroupKFold(n_splits=3)) ( #203 , via Konstantinos Paliouras ).","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_8","text":"SequentialFeatureSelector now correctly accepts a None argument for the scoring parameter to infer the default scoring metric from scikit-learn classifiers and regressors ( #171 ). The plot_decision_regions function now supports pre-existing axes objects generated via matplotlib's plt.subplots . ( #184 , see example ) Made math.num_combinations and math.num_permutations numerically stable for large numbers of combinations and permutations ( #200 ).","title":"Bug Fixes"},{"location":"CHANGELOG/#version-060-2017-03-18","text":"","title":"Version 0.6.0 (2017-03-18)"},{"location":"CHANGELOG/#downloads_9","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_9","text":"An association_rules function is implemented that allows to generate rules based on a list of frequent itemsets (via Joshua Goerner ).","title":"New Features"},{"location":"CHANGELOG/#changes_9","text":"Adds a black edgecolor to plots via plotting.plot_decision_regions to make markers more distinguishable from the background in matplotlib>=2.0 . The association submodule was renamed to frequent_patterns .","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_9","text":"The DataFrame index of apriori results are now unique and ordered. Fixed typos in autompg and wine datasets (via James Bourbeau ).","title":"Bug Fixes"},{"location":"CHANGELOG/#version-051-2017-02-14","text":"","title":"Version 0.5.1 (2017-02-14)"},{"location":"CHANGELOG/#downloads_10","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_10","text":"The EnsembleVoteClassifier has a new refit attribute that prevents refitting classifiers if refit=False to save computational time. Added a new lift_score function in evaluate to compute lift score (via Batuhan Bardak ). StackingClassifier and StackingRegressor support multivariate targets if the underlying models do (via kernc ). StackingClassifier has a new use_features_in_secondary attribute like StackingCVClassifier .","title":"New Features"},{"location":"CHANGELOG/#changes_10","text":"Changed default verbosity level in SequentialFeatureSelector to 0 The EnsembleVoteClassifier now raises a NotFittedError if the estimator wasn't fit before calling predict . (via Anton Loss ) Added new TensorFlow variable initialization syntax to guarantee compatibility with TensorFlow 1.0","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_10","text":"Fixed wrong default value for k_features in SequentialFeatureSelector Cast selected feature subsets in the SequentialFeautureSelector as sets to prevent the iterator from getting stuck if the k_idx are different permutations of the same combination (via Zac Wellmer ). Fixed an issue with learning curves that caused the performance metrics to be reversed (via ipashchenko ) Fixed a bug that could occur in the SequentialFeatureSelector if there are similarly-well performing subsets in the floating variants (via Zac Wellmer ).","title":"Bug Fixes"},{"location":"CHANGELOG/#version-050-2016-11-09","text":"","title":"Version 0.5.0 (2016-11-09)"},{"location":"CHANGELOG/#downloads_11","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_11","text":"New ExhaustiveFeatureSelector estimator in mlxtend.feature_selection for evaluating all feature combinations in a specified range The StackingClassifier has a new parameter average_probas that is set to True by default to maintain the current behavior. A deprecation warning was added though, and it will default to False in future releases (0.6.0); average_probas=False will result in stacking of the level-1 predicted probabilities rather than averaging these. New StackingCVClassifier estimator in 'mlxtend.classifier' for implementing a stacking ensemble that uses cross-validation techniques for training the meta-estimator to avoid overfitting ( Reiichiro Nakano ) New OnehotTransactions encoder class added to the preprocessing submodule for transforming transaction data into a one-hot encoded array The SequentialFeatureSelector estimator in mlxtend.feature_selection now is safely stoppable mid-process by control+c, and deprecated print_progress in favor of a more tunable verbose parameter ( Will McGinnis ) New apriori function in association to extract frequent itemsets from transaction data for association rule mining New checkerboard_plot function in plotting to plot checkerboard tables / heat maps New mcnemar_table and mcnemar functions in evaluate to compute 2x2 contingency tables and McNemar's test","title":"New Features"},{"location":"CHANGELOG/#changes_11","text":"All plotting functions have been moved to mlxtend.plotting for compatibility reasons with continuous integration services and to make the installation of matplotlib optional for users of mlxtend 's core functionality Added a compatibility layer for scikit-learn 0.18 using the new model_selection module while maintaining backwards compatibility to scikit-learn 0.17.","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_11","text":"mlxtend.plotting.plot_decision_regions now draws decision regions correctly if more than 4 class labels are present Raise AttributeError in plot_decision_regions when the X_higlight argument is a 1D array ( chkoar )","title":"Bug Fixes"},{"location":"CHANGELOG/#version-042-2016-08-24","text":"","title":"Version 0.4.2 (2016-08-24)"},{"location":"CHANGELOG/#downloads_12","text":"Source code (zip) Source code (tar.gz) PDF documentation","title":"Downloads"},{"location":"CHANGELOG/#new-features_12","text":"Added preprocessing.CopyTransformer , a mock class that returns copies of imput arrays via transform and fit_transform","title":"New Features"},{"location":"CHANGELOG/#changes_12","text":"Added AppVeyor to CI to ensure MS Windows compatibility Dataset are now saved as compressed .txt or .csv files rather than being imported as Python objects feature_selection.SequentialFeatureSelector now supports the selection of k_features using a tuple to specify a \"min-max\" k_features range Added \"SVD solver\" option to the PrincipalComponentAnalysis Raise a AttributeError with \"not fitted\" message in SequentialFeatureSelector if transform or get_metric_dict are called prior to fit Use small, positive bias units in TfMultiLayerPerceptron 's hidden layer(s) if the activations are ReLUs in order to avoid dead neurons Added an optional clone_estimator parameter to the SequentialFeatureSelector that defaults to True , avoiding the modification of the original estimator objects More rigorous type and shape checks in the evaluate.plot_decision_regions function DenseTransformer now doesn't raise and error if the input array is not sparse API clean-up using scikit-learn's BaseEstimator as parent class for feature_selection.ColumnSelector","title":"Changes"},{"location":"CHANGELOG/#bug-fixes_12","text":"Fixed a problem when a tuple-range was provided as argument to the SequentialFeatureSelector 's k_features parameter and the scoring metric was more negative than -1 (e.g., as in scikit-learn's MSE scoring function) (wahutch](https://github.com/wahutch)) Fixed an AttributeError issue when verbose > 1 in StackingClassifier Fixed a bug in classifier.SoftmaxRegression where the mean values of the offsets were used to update the bias units rather than their sum Fixed rare bug in MLP _layer_mapping functions that caused a swap between the random number generation seed when initializing weights and biases","title":"Bug Fixes"},{"location":"CHANGELOG/#version-041-2016-05-01","text":"","title":"Version 0.4.1 (2016-05-01)"},{"location":"CHANGELOG/#downloads_13","text":"Source code (zip) Source code (tar.gz) PDF documentation","title":"Downloads"},{"location":"CHANGELOG/#new-features_13","text":"New TensorFlow estimator for Linear Regression ( tf_regressor.TfLinearRegression ) New k-means clustering estimator ( cluster.Kmeans ) New TensorFlow k-means clustering estimator ( tf_cluster.Kmeans )","title":"New Features"},{"location":"CHANGELOG/#changes_13","text":"Due to refactoring of the estimator classes, the init_weights parameter of the fit methods was globally renamed to init_params Overall performance improvements of estimators due to code clean-up and refactoring Added several additional checks for correct array types and more meaningful exception messages Added optional dropout to the tf_classifier.TfMultiLayerPerceptron classifier for regularization Added an optional decay parameter to the tf_classifier.TfMultiLayerPerceptron classifier for adaptive learning via an exponential decay of the learning rate eta Replaced old NeuralNetMLP by more streamlined MultiLayerPerceptron ( classifier.MultiLayerPerceptron ); now also with softmax in the output layer and categorical cross-entropy loss. Unified init_params parameter for fit functions to continue training where the algorithm left off (if supported)","title":"Changes"},{"location":"CHANGELOG/#version-040-2016-04-09","text":"","title":"Version 0.4.0 (2016-04-09)"},{"location":"CHANGELOG/#new-features_14","text":"New TfSoftmaxRegression classifier using Tensorflow ( tf_classifier.TfSoftmaxRegression ) New SoftmaxRegression classifier ( classifier.SoftmaxRegression ) New TfMultiLayerPerceptron classifier using Tensorflow ( tf_classifier.TfMultiLayerPerceptron ) New StackingRegressor ( regressor.StackingRegressor ) New StackingClassifier ( classifier.StackingClassifier ) New function for one-hot encoding of class labels ( preprocessing.one_hot ) Added GridSearch support to the SequentialFeatureSelector ( feature_selection/.SequentialFeatureSelector ) evaluate.plot_decision_regions improvements: Function now handles class y-class labels correctly if array is of type float Correct handling of input arguments markers and colors Accept an existing Axes via the ax argument New print_progress parameter for all generalized models and multi-layer neural networks for printing time elapsed, ETA, and the current cost of the current epoch Minibatch learning for classifier.LogisticRegression , classifier.Adaline , and regressor.LinearRegression plus streamlined API New Principal Component Analysis class via mlxtend.feature_extraction.PrincipalComponentAnalysis New RBF Kernel Principal Component Analysis class via mlxtend.feature_extraction.RBFKernelPCA New Linear Discriminant Analysis class via mlxtend.feature_extraction.LinearDiscriminantAnalysis","title":"New Features"},{"location":"CHANGELOG/#changes_14","text":"The column parameter in mlxtend.preprocessing.standardize now defaults to None to standardize all columns more conveniently","title":"Changes"},{"location":"CHANGELOG/#version-030-2016-01-31","text":"","title":"Version 0.3.0 (2016-01-31)"},{"location":"CHANGELOG/#downloads_14","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_15","text":"Added a progress bar tracker to classifier.NeuralNetMLP Added a function to score predicted vs. target class labels evaluate.scoring Added confusion matrix functions to create ( evaluate.confusion_matrix ) and plot ( evaluate.plot_confusion_matrix ) confusion matrices New style parameter and improved axis scaling in mlxtend.evaluate.plot_learning_curves Added loadlocal_mnist to mlxtend.data for streaming MNIST from a local byte files into numpy arrays New NeuralNetMLP parameters: random_weights , shuffle_init , shuffle_epoch New SFS features such as the generation of pandas DataFrame results tables and plotting functions (with confidence intervals, standard deviation, and standard error bars) Added support for regression estimators in SFS Added Boston housing dataset New shuffle parameter for classifier.NeuralNetMLP","title":"New Features"},{"location":"CHANGELOG/#changes_15","text":"The mlxtend.preprocessing.standardize function now optionally returns the parameters, which are estimated from the array, for re-use. A further improvement makes the standardize function smarter in order to avoid zero-division errors Cosmetic improvements to the evaluate.plot_decision_regions function such as hiding plot axes Renaming of classifier.EnsembleClassfier to classifier.EnsembleVoteClassifier Improved random weight initialization in Perceptron , Adaline , LinearRegression , and LogisticRegression Changed learning parameter of mlxtend.classifier.Adaline to solver and added \"normal equation\" as closed-form solution solver Hide y-axis labels in mlxtend.evaluate.plot_decision_regions in 1 dimensional evaluations Sequential Feature Selection algorithms were unified into a single SequentialFeatureSelector class with parameters to enable floating selection and toggle between forward and backward selection. Stratified sampling of MNIST (now 500x random samples from each of the 10 digit categories) Renaming mlxtend.plotting to mlxtend.general_plotting in order to distinguish general plotting function from specialized utility function such as evaluate.plot_decision_regions","title":"Changes"},{"location":"CHANGELOG/#version-029-2015-07-14","text":"","title":"Version 0.2.9 (2015-07-14)"},{"location":"CHANGELOG/#downloads_15","text":"Source code (zip) Source code (tar.gz)","title":"Downloads"},{"location":"CHANGELOG/#new-features_16","text":"Sequential Feature Selection algorithms: SFS, SFFS, SBS, and SFBS","title":"New Features"},{"location":"CHANGELOG/#changes_16","text":"Changed regularization & lambda parameters in LogisticRegression to single parameter l2_lambda","title":"Changes"},{"location":"CHANGELOG/#version-028-2015-06-27","text":"API changes: mlxtend.sklearn.EnsembleClassifier -> mlxtend.classifier.EnsembleClassifier mlxtend.sklearn.ColumnSelector -> mlxtend.feature_selection.ColumnSelector mlxtend.sklearn.DenseTransformer -> mlxtend.preprocessing.DenseTransformer mlxtend.pandas.standardizing -> mlxtend.preprocessing.standardizing mlxtend.pandas.minmax_scaling -> mlxtend.preprocessing.minmax_scaling mlxtend.matplotlib -> mlxtend.plotting Added momentum learning parameter (alpha coefficient) to mlxtend.classifier.NeuralNetMLP . Added adaptive learning rate (decrease constant) to mlxtend.classifier.NeuralNetMLP . mlxtend.pandas.minmax_scaling became mlxtend.preprocessing.minmax_scaling and also supports NumPy arrays now mlxtend.pandas.standardizing became mlxtend.preprocessing.standardizing and now supports both NumPy arrays and pandas DataFrames; also, now ddof parameters to set the degrees of freedom when calculating the standard deviation","title":"Version 0.2.8 (2015-06-27)"},{"location":"CHANGELOG/#version-027-2015-06-20","text":"Added multilayer perceptron (feedforward artificial neural network) classifier as mlxtend.classifier.NeuralNetMLP . Added 5000 labeled trainingsamples from the MNIST handwritten digits dataset to mlxtend.data","title":"Version 0.2.7 (2015-06-20)"},{"location":"CHANGELOG/#version-026-2015-05-08","text":"Added ordinary least square regression using different solvers (gradient and stochastic gradient descent, and the closed form solution (normal equation) Added option for random weight initialization to logistic regression classifier and updated l2 regularization Added wine dataset to mlxtend.data Added invert_axes parameter mlxtend.matplotlib.enrichtment_plot to optionally plot the \"Count\" on the x-axis New verbose parameter for mlxtend.sklearn.EnsembleClassifier by Alejandro C. Bahnsen Added mlxtend.pandas.standardizing to standardize columns in a Pandas DataFrame Added parameters linestyles and markers to mlxtend.matplotlib.enrichment_plot mlxtend.regression.lin_regplot automatically adds np.newaxis and works w. python lists Added tokenizers: mlxtend.text.extract_emoticons and mlxtend.text.extract_words_and_emoticons","title":"Version 0.2.6 (2015-05-08)"},{"location":"CHANGELOG/#version-025-2015-04-17","text":"Added Sequential Backward Selection (mlxtend.sklearn.SBS) Added X_highlight parameter to mlxtend.evaluate.plot_decision_regions for highlighting test data points. Added mlxtend.regression.lin_regplot to plot the fitted line from linear regression. Added mlxtend.matplotlib.stacked_barplot to conveniently produce stacked barplots using pandas DataFrame s. Added mlxtend.matplotlib.enrichment_plot","title":"Version 0.2.5 (2015-04-17)"},{"location":"CHANGELOG/#version-024-2015-03-15","text":"Added scoring to mlxtend.evaluate.learning_curves (by user pfsq) Fixed setup.py bug caused by the missing README.html file matplotlib.category_scatter for pandas DataFrames and Numpy arrays","title":"Version 0.2.4 (2015-03-15)"},{"location":"CHANGELOG/#version-023-2015-03-11","text":"Added Logistic regression Gradient descent and stochastic gradient descent perceptron was changed to Adaline (Adaptive Linear Neuron) Perceptron and Adaline for {0, 1} classes Added mlxtend.preprocessing.shuffle_arrays_unison function to shuffle one or more NumPy arrays. Added shuffle and random seed parameter to stochastic gradient descent classifier. Added rstrip parameter to mlxtend.file_io.find_filegroups to allow trimming of base names. Added ignore_substring parameter to mlxtend.file_io.find_filegroups and find_files . Replaced .rstrip in mlxtend.file_io.find_filegroups with more robust regex. Gridsearch support for mlxtend.sklearn.EnsembleClassifier","title":"Version 0.2.3 (2015-03-11)"},{"location":"CHANGELOG/#version-022-2015-03-01","text":"Improved robustness of EnsembleClassifier. Extended plot_decision_regions() functionality for plotting 1D decision boundaries. Function matplotlib.plot_decision_regions was reorganized to evaluate.plot_decision_regions . evaluate.plot_learning_curves() function added. Added Rosenblatt, gradient descent, and stochastic gradient descent perceptrons.","title":"Version 0.2.2 (2015-03-01)"},{"location":"CHANGELOG/#version-021-2015-01-20","text":"Added mlxtend.pandas.minmax_scaling - a function to rescale pandas DataFrame columns. Slight update to the EnsembleClassifier interface (additional voting parameter) Fixed EnsembleClassifier to return correct class labels if class labels are not integers from 0 to n. Added new matplotlib function to plot decision regions of classifiers.","title":"Version 0.2.1 (2015-01-20)"},{"location":"CHANGELOG/#version-020-2015-01-13","text":"Improved mlxtend.text.generalize_duplcheck to remove duplicates and prevent endless looping issue. Added recursive search parameter to mlxtend.file_io.find_files. Added check_ext parameter mlxtend.file_io.find_files to search based on file extensions. Default parameter to ignore invisible files for mlxtend.file_io.find. Added transform and fit_transform to the EnsembleClassifier . Added mlxtend.file_io.find_filegroups function.","title":"Version 0.2.0 (2015-01-13)"},{"location":"CHANGELOG/#version-019-2015-01-10","text":"Implemented scikit-learn EnsembleClassifier (majority voting rule) class.","title":"Version 0.1.9 (2015-01-10)"},{"location":"CHANGELOG/#version-018-2015-01-07","text":"Improvements to mlxtend.text.generalize_names to handle certain Dutch last name prefixes (van, van der, de, etc.). Added mlxtend.text.generalize_name_duplcheck function to apply mlxtend.text.generalize_names function to a pandas DataFrame without creating duplicates.","title":"Version 0.1.8 (2015-01-07)"},{"location":"CHANGELOG/#version-017-2015-01-07","text":"Added text utilities with name generalization function. Added and file_io utilities.","title":"Version 0.1.7 (2015-01-07)"},{"location":"CHANGELOG/#version-016-2015-01-04","text":"Added combinations and permutations estimators.","title":"Version 0.1.6 (2015-01-04)"},{"location":"CHANGELOG/#version-015-2014-12-11","text":"Added DenseTransformer for pipelines and grid search.","title":"Version 0.1.5 (2014-12-11)"},{"location":"CHANGELOG/#version-014-2014-08-20","text":"mean_centering function is now a Class that creates MeanCenterer objects that can be used to fit data via the fit method, and center data at the column means via the transform and fit_transform method.","title":"Version 0.1.4 (2014-08-20)"},{"location":"CHANGELOG/#version-013-2014-08-19","text":"Added preprocessing module and mean_centering function.","title":"Version 0.1.3 (2014-08-19)"},{"location":"CHANGELOG/#version-012-2014-08-19","text":"Added matplotlib utilities and remove_borders function.","title":"Version 0.1.2 (2014-08-19)"},{"location":"CHANGELOG/#version-011-2014-08-13","text":"Simplified code for ColumnSelector.","title":"Version 0.1.1 (2014-08-13)"},{"location":"CONTRIBUTING/","text":"How to Contribute I would be very happy about any kind of contributions that help to improve and extend the functionality of mlxtend. Quick Contributor Checklist This is a quick checklist about the different steps of a typical contribution to mlxtend (and other open source projects). Consider copying this list to a local text file (or the issue tracker) and checking off items as you go. [ ] Open a new \"issue\" on GitHub to discuss the new feature / bug fix [ ] Fork the mlxtend repository from GitHub (if not already done earlier) [ ] Create and check out a new topic branch (please don't make modifications in the master branch) [ ] Implement the new feature or apply the bug-fix [ ] Add appropriate unit test functions in mlxtend/*/tests [ ] Run nosetests ./mlxtend -sv and make sure that all unit tests pass [ ] Check/improve the test coverage by running nosetests ./mlxtend --with-coverage [ ] Check for style issues by running flake8 ./mlxtend (you may want to run nosetests again after you made modifications to the code) [ ] Add a note about the modification/contribution to the ./docs/sources/changelog.md file [ ] Modify documentation in the appropriate location under mlxtend/docs/sources/ [ ] Push the topic branch to the server and create a pull request [ ] Check the Travis-CI build passed at https://travis-ci.org/rasbt/mlxtend [ ] Check/improve the unit test coverage at https://coveralls.io/github/rasbt/mlxtend [ ] Check/improve the code health at https://landscape.io/github/rasbt/mlxtend Tips for Contributors Getting Started - Creating a New Issue and Forking the Repository If you don't have a GitHub account, yet, please create one to contribute to this project. Please submit a ticket for your issue to discuss the fix or new feature before too much time and effort is spent for the implementation. Fork the mlxtend repository from the GitHub web interface. Clone the mlxtend repository to your local machine by executing git clone https://github.com//mlxtend.git Syncing an Existing Fork If you already forked mlxtend earlier, you can bring you \"Fork\" up to date with the master branch as follows: 1. Configuring a remote that points to the upstream repository on GitHub List the current configured remote repository of your fork by executing $ git remote -v If you see something like origin https://github.com//mlxtend.git (fetch) origin https://github.com//mlxtend.git (push) you need to specify a new remote upstream repository via $ git remote add upstream https://github.com/rasbt/mlxtend.git Now, verify the new upstream repository you've specified for your fork by executing $ git remote -v You should see following output if everything is configured correctly: origin https://github.com//mlxtend.git (fetch) origin https://github.com//mlxtend.git (push) upstream https://github.com/rasbt/mlxtend.git (fetch) upstream https://github.com/rasbt/mlxtend.git (push) 2. Syncing your Fork First, fetch the updates of the original project's master branch by executing: $ git fetch upstream You should see the following output remote: Counting objects: xx, done. remote: Compressing objects: 100% (xx/xx), done. remote: Total xx (delta xx), reused xx (delta x) Unpacking objects: 100% (xx/xx), done. From https://github.com/rasbt/mlxtend * [new branch] master -> upstream/master This means that the commits to the rasbt/mlxtend master branch are now stored in the local branch upstream/master . If you are not already on your local project's master branch, execute $ git checkout master Finally, merge the changes in upstream/master to your local master branch by executing $ git merge upstream/master which will give you an output that looks similar to Updating xxx...xxx Fast-forward SOME FILE1 | 12 +++++++ SOME FILE2 | 10 +++++++ 2 files changed, 22 insertions(+), *The Main Workflow - Making Changes in a New Topic Branch Listed below are the 9 typical steps of a contribution. 1. Discussing the Feature or Modification Before you start coding, please discuss the new feature, bugfix, or other modification to the project on the project's issue tracker . Before you open a \"new issue,\" please do a quick search to see if a similar issue has been submitted already. 2. Creating a new feature branch Please avoid working directly on the master branch but create a new feature branch: $ git branch Switch to the new feature branch by executing $ git checkout 3. Developing the new feature / bug fix Now it's time to modify existing code or to contribute new code to the project. 4. Testing your code Add the respective unit tests and check if they pass: $ nosetests -sv Use the --with-coverage flag to ensure that all code is being covered in the unit tests: $ nosetests --with-coverage 5. Documenting changes Please add an entry to the mlxtend/docs/sources/changelog.md file. If it is a new feature, it would also be nice if you could update the documentation in appropriate location in mlxtend/sources . 6. Committing changes When you are ready to commit the changes, please provide a meaningful commit message: $ git add # or `git add .` $ git commit -m '' 7. Optional: squashing commits If you made multiple smaller commits, it would be nice if you could group them into a larger, summarizing commit. First, list your recent commit via Note Due to the improved GitHub UI, this is no longer necessary/encouraged. $ git log which will list the commits from newest to oldest in the following format by default: commit 046e3af8a9127df8eac879454f029937c8a31c41 Author: rasbt Date: Tue Nov 24 03:46:37 2015 -0500 fixed setup.py commit c3c00f6ba0e8f48bbe1c9081b8ae3817e57ecc5c Author: rasbt Date: Tue Nov 24 03:04:39 2015 -0500 documented feature x commit d87934fe8726c46f0b166d6290a3bf38915d6e75 Author: rasbt Date: Tue Nov 24 02:44:45 2015 -0500 added support for feature x Assuming that it would make sense to group these 3 commits into one, we can execute $ git rebase -i HEAD~3 which will bring our default git editor with the following contents: pick d87934f added support for feature x pick c3c00f6 documented feature x pick 046e3af fixed setup.py Since c3c00f6 and 046e3af are related to the original commit of feature x , let's keep the d87934f and squash the 2 following commits into this initial one by changes the lines to pick d87934f added support for feature x squash c3c00f6 documented feature x squash 046e3af fixed setup.py Now, save the changes in your editor. Now, quitting the editor will apply the rebase changes, and the editor will open a second time, prompting you to enter a new commit message. In this case, we could enter support for feature x to summarize the contributions. 8. Uploading changes Push your changes to a topic branch to the git server by executing: $ git push origin 9. Submitting a pull request Go to your GitHub repository online, select the new feature branch, and submit a new pull request: Notes for Developers Building the documentation The documentation is built via MkDocs ; to ensure that the documentation is rendered correctly, you can view the documentation locally by executing mkdocs serve from the mlxtend/docs directory. For example, ~/github/mlxtend/docs$ mkdocs serve 1. Building the API documentation To build the API documentation, navigate to mlxtend/docs and execute the make_api.py file from this directory via ~/github/mlxtend/docs$ python make_api.py This should place the API documentation into the correct directories into the two directories: mlxtend/docs/sources/api_modules mlxtend/docs/sources/api_subpackes 2. Editing the User Guide The documents containing code examples for the \"User Guide\" are generated from IPython Notebook files. In order to convert a IPython notebook file to markdown after editing, please follow the following steps: Modify or edit the existing notebook. Execute all cells in the current notebook and make sure that no errors occur. Convert the notebook to markdown using the ipynb2markdown.py converter ~/github/mlxtend/docs$ python ipynb2markdown.py --ipynb_path ./sources/user_guide/subpackage/notebookname.ipynb Note If you are adding a new document, please also include it in the pages section in the mlxtend/docs/mkdocs.yml file. 3. Building static HTML files of the documentation First, please check the documenation via localhost (http://127.0.0.1:8000/): ~/github/mlxtend/docs$ mkdocs serve Next, build the static HTML files of the mlxtend documentation via ~/github/mlxtend/docs$ mkdocs build --clean To deploy the documentation, execute ~/github/mlxtend/docs$ mkdocs gh-deploy --clean 4. Generate a PDF of the documentation To generate a PDF version of the documentation, simply cd into the mlxtend/docs directory and execute: python md2pdf.py Uploading a new version to PyPI 1. Creating a new testing environment Assuming we are using conda , create a new python environment via $ conda create -n 'mlxtend-testing' python=3 numpy scipy pandas Next, activate the environment by executing $ source activate mlxtend-testing 2. Installing the package from local files Test the installation by executing $ python setup.py install --record files.txt the --record files.txt flag will create a files.txt file listing the locations where these files will be installed. Try to import the package to see if it works, for example, by executing $ python -c 'import mlxtend; print(mlxtend.__file__)' If everything seems to be fine, remove the installation via $ cat files.txt | xargs rm -rf ; rm files.txt Next, test if pip is able to install the packages. First, navigate to a different directory, and from there, install the package: $ pip install mlxtend and uninstall it again $ pip uninstall mlxtend 3. Deploying the package Consider deploying the package to the PyPI test server first. The setup instructions can be found here . $ python setup.py sdist bdist_wheel upload -r https://testpypi.python.org/pypi Test if it can be installed from there by executing $ pip install -i https://testpypi.python.org/pypi mlxtend and uninstall it $ pip uninstall mlxtend After this dry-run succeeded, repeat this process using the \"real\" PyPI: $ python setup.py sdist bdist_wheel upload 4. Removing the virtual environment Finally, to cleanup our local drive, remove the virtual testing environment via $ conda remove --name 'mlxtend-testing' --all 5. Updating the conda-forge recipe Once a new version of mlxtend has been uploaded to PyPI, update the conda-forge build recipe at https://github.com/conda-forge/mlxtend-feedstock by changing the version number in the recipe/meta.yaml file appropriately.","title":"How To Contribute"},{"location":"CONTRIBUTING/#how-to-contribute","text":"I would be very happy about any kind of contributions that help to improve and extend the functionality of mlxtend.","title":"How to Contribute"},{"location":"CONTRIBUTING/#quick-contributor-checklist","text":"This is a quick checklist about the different steps of a typical contribution to mlxtend (and other open source projects). Consider copying this list to a local text file (or the issue tracker) and checking off items as you go. [ ] Open a new \"issue\" on GitHub to discuss the new feature / bug fix [ ] Fork the mlxtend repository from GitHub (if not already done earlier) [ ] Create and check out a new topic branch (please don't make modifications in the master branch) [ ] Implement the new feature or apply the bug-fix [ ] Add appropriate unit test functions in mlxtend/*/tests [ ] Run nosetests ./mlxtend -sv and make sure that all unit tests pass [ ] Check/improve the test coverage by running nosetests ./mlxtend --with-coverage [ ] Check for style issues by running flake8 ./mlxtend (you may want to run nosetests again after you made modifications to the code) [ ] Add a note about the modification/contribution to the ./docs/sources/changelog.md file [ ] Modify documentation in the appropriate location under mlxtend/docs/sources/ [ ] Push the topic branch to the server and create a pull request [ ] Check the Travis-CI build passed at https://travis-ci.org/rasbt/mlxtend [ ] Check/improve the unit test coverage at https://coveralls.io/github/rasbt/mlxtend [ ] Check/improve the code health at https://landscape.io/github/rasbt/mlxtend","title":"Quick Contributor Checklist"},{"location":"CONTRIBUTING/#tips-for-contributors","text":"","title":"Tips for Contributors"},{"location":"CONTRIBUTING/#getting-started-creating-a-new-issue-and-forking-the-repository","text":"If you don't have a GitHub account, yet, please create one to contribute to this project. Please submit a ticket for your issue to discuss the fix or new feature before too much time and effort is spent for the implementation. Fork the mlxtend repository from the GitHub web interface. Clone the mlxtend repository to your local machine by executing git clone https://github.com//mlxtend.git","title":"Getting Started - Creating a New Issue and Forking the Repository"},{"location":"CONTRIBUTING/#syncing-an-existing-fork","text":"If you already forked mlxtend earlier, you can bring you \"Fork\" up to date with the master branch as follows:","title":"Syncing an Existing Fork"},{"location":"CONTRIBUTING/#1-configuring-a-remote-that-points-to-the-upstream-repository-on-github","text":"List the current configured remote repository of your fork by executing $ git remote -v If you see something like origin https://github.com//mlxtend.git (fetch) origin https://github.com//mlxtend.git (push) you need to specify a new remote upstream repository via $ git remote add upstream https://github.com/rasbt/mlxtend.git Now, verify the new upstream repository you've specified for your fork by executing $ git remote -v You should see following output if everything is configured correctly: origin https://github.com//mlxtend.git (fetch) origin https://github.com//mlxtend.git (push) upstream https://github.com/rasbt/mlxtend.git (fetch) upstream https://github.com/rasbt/mlxtend.git (push)","title":"1. Configuring a remote that points to the upstream repository on GitHub"},{"location":"CONTRIBUTING/#2-syncing-your-fork","text":"First, fetch the updates of the original project's master branch by executing: $ git fetch upstream You should see the following output remote: Counting objects: xx, done. remote: Compressing objects: 100% (xx/xx), done. remote: Total xx (delta xx), reused xx (delta x) Unpacking objects: 100% (xx/xx), done. From https://github.com/rasbt/mlxtend * [new branch] master -> upstream/master This means that the commits to the rasbt/mlxtend master branch are now stored in the local branch upstream/master . If you are not already on your local project's master branch, execute $ git checkout master Finally, merge the changes in upstream/master to your local master branch by executing $ git merge upstream/master which will give you an output that looks similar to Updating xxx...xxx Fast-forward SOME FILE1 | 12 +++++++ SOME FILE2 | 10 +++++++ 2 files changed, 22 insertions(+),","title":"2. Syncing your Fork"},{"location":"CONTRIBUTING/#the-main-workflow-making-changes-in-a-new-topic-branch","text":"Listed below are the 9 typical steps of a contribution.","title":"*The Main Workflow - Making Changes in a New Topic Branch"},{"location":"CONTRIBUTING/#1-discussing-the-feature-or-modification","text":"Before you start coding, please discuss the new feature, bugfix, or other modification to the project on the project's issue tracker . Before you open a \"new issue,\" please do a quick search to see if a similar issue has been submitted already.","title":"1. Discussing the Feature or Modification"},{"location":"CONTRIBUTING/#2-creating-a-new-feature-branch","text":"Please avoid working directly on the master branch but create a new feature branch: $ git branch Switch to the new feature branch by executing $ git checkout ","title":"2. Creating a new feature branch"},{"location":"CONTRIBUTING/#3-developing-the-new-feature-bug-fix","text":"Now it's time to modify existing code or to contribute new code to the project.","title":"3. Developing the new feature / bug fix"},{"location":"CONTRIBUTING/#4-testing-your-code","text":"Add the respective unit tests and check if they pass: $ nosetests -sv Use the --with-coverage flag to ensure that all code is being covered in the unit tests: $ nosetests --with-coverage","title":"4. Testing your code"},{"location":"CONTRIBUTING/#5-documenting-changes","text":"Please add an entry to the mlxtend/docs/sources/changelog.md file. If it is a new feature, it would also be nice if you could update the documentation in appropriate location in mlxtend/sources .","title":"5. Documenting changes"},{"location":"CONTRIBUTING/#6-committing-changes","text":"When you are ready to commit the changes, please provide a meaningful commit message: $ git add # or `git add .` $ git commit -m ''","title":"6. Committing changes"},{"location":"CONTRIBUTING/#7-optional-squashing-commits","text":"If you made multiple smaller commits, it would be nice if you could group them into a larger, summarizing commit. First, list your recent commit via Note Due to the improved GitHub UI, this is no longer necessary/encouraged. $ git log which will list the commits from newest to oldest in the following format by default: commit 046e3af8a9127df8eac879454f029937c8a31c41 Author: rasbt Date: Tue Nov 24 03:46:37 2015 -0500 fixed setup.py commit c3c00f6ba0e8f48bbe1c9081b8ae3817e57ecc5c Author: rasbt Date: Tue Nov 24 03:04:39 2015 -0500 documented feature x commit d87934fe8726c46f0b166d6290a3bf38915d6e75 Author: rasbt Date: Tue Nov 24 02:44:45 2015 -0500 added support for feature x Assuming that it would make sense to group these 3 commits into one, we can execute $ git rebase -i HEAD~3 which will bring our default git editor with the following contents: pick d87934f added support for feature x pick c3c00f6 documented feature x pick 046e3af fixed setup.py Since c3c00f6 and 046e3af are related to the original commit of feature x , let's keep the d87934f and squash the 2 following commits into this initial one by changes the lines to pick d87934f added support for feature x squash c3c00f6 documented feature x squash 046e3af fixed setup.py Now, save the changes in your editor. Now, quitting the editor will apply the rebase changes, and the editor will open a second time, prompting you to enter a new commit message. In this case, we could enter support for feature x to summarize the contributions.","title":"7. Optional: squashing commits"},{"location":"CONTRIBUTING/#8-uploading-changes","text":"Push your changes to a topic branch to the git server by executing: $ git push origin ","title":"8. Uploading changes"},{"location":"CONTRIBUTING/#9-submitting-a-pull-request","text":"Go to your GitHub repository online, select the new feature branch, and submit a new pull request:","title":"9. Submitting a pull request"},{"location":"CONTRIBUTING/#notes-for-developers","text":"","title":"Notes for Developers"},{"location":"CONTRIBUTING/#building-the-documentation","text":"The documentation is built via MkDocs ; to ensure that the documentation is rendered correctly, you can view the documentation locally by executing mkdocs serve from the mlxtend/docs directory. For example, ~/github/mlxtend/docs$ mkdocs serve","title":"Building the documentation"},{"location":"CONTRIBUTING/#1-building-the-api-documentation","text":"To build the API documentation, navigate to mlxtend/docs and execute the make_api.py file from this directory via ~/github/mlxtend/docs$ python make_api.py This should place the API documentation into the correct directories into the two directories: mlxtend/docs/sources/api_modules mlxtend/docs/sources/api_subpackes","title":"1. Building the API documentation"},{"location":"CONTRIBUTING/#2-editing-the-user-guide","text":"The documents containing code examples for the \"User Guide\" are generated from IPython Notebook files. In order to convert a IPython notebook file to markdown after editing, please follow the following steps: Modify or edit the existing notebook. Execute all cells in the current notebook and make sure that no errors occur. Convert the notebook to markdown using the ipynb2markdown.py converter ~/github/mlxtend/docs$ python ipynb2markdown.py --ipynb_path ./sources/user_guide/subpackage/notebookname.ipynb Note If you are adding a new document, please also include it in the pages section in the mlxtend/docs/mkdocs.yml file.","title":"2. Editing the User Guide"},{"location":"CONTRIBUTING/#3-building-static-html-files-of-the-documentation","text":"First, please check the documenation via localhost (http://127.0.0.1:8000/): ~/github/mlxtend/docs$ mkdocs serve Next, build the static HTML files of the mlxtend documentation via ~/github/mlxtend/docs$ mkdocs build --clean To deploy the documentation, execute ~/github/mlxtend/docs$ mkdocs gh-deploy --clean","title":"3. Building static HTML files of the documentation"},{"location":"CONTRIBUTING/#4-generate-a-pdf-of-the-documentation","text":"To generate a PDF version of the documentation, simply cd into the mlxtend/docs directory and execute: python md2pdf.py","title":"4. Generate a PDF of the documentation"},{"location":"CONTRIBUTING/#uploading-a-new-version-to-pypi","text":"","title":"Uploading a new version to PyPI"},{"location":"CONTRIBUTING/#1-creating-a-new-testing-environment","text":"Assuming we are using conda , create a new python environment via $ conda create -n 'mlxtend-testing' python=3 numpy scipy pandas Next, activate the environment by executing $ source activate mlxtend-testing","title":"1. Creating a new testing environment"},{"location":"CONTRIBUTING/#2-installing-the-package-from-local-files","text":"Test the installation by executing $ python setup.py install --record files.txt the --record files.txt flag will create a files.txt file listing the locations where these files will be installed. Try to import the package to see if it works, for example, by executing $ python -c 'import mlxtend; print(mlxtend.__file__)' If everything seems to be fine, remove the installation via $ cat files.txt | xargs rm -rf ; rm files.txt Next, test if pip is able to install the packages. First, navigate to a different directory, and from there, install the package: $ pip install mlxtend and uninstall it again $ pip uninstall mlxtend","title":"2. Installing the package from local files"},{"location":"CONTRIBUTING/#3-deploying-the-package","text":"Consider deploying the package to the PyPI test server first. The setup instructions can be found here . $ python setup.py sdist bdist_wheel upload -r https://testpypi.python.org/pypi Test if it can be installed from there by executing $ pip install -i https://testpypi.python.org/pypi mlxtend and uninstall it $ pip uninstall mlxtend After this dry-run succeeded, repeat this process using the \"real\" PyPI: $ python setup.py sdist bdist_wheel upload","title":"3. Deploying the package"},{"location":"CONTRIBUTING/#4-removing-the-virtual-environment","text":"Finally, to cleanup our local drive, remove the virtual testing environment via $ conda remove --name 'mlxtend-testing' --all","title":"4. Removing the virtual environment"},{"location":"CONTRIBUTING/#5-updating-the-conda-forge-recipe","text":"Once a new version of mlxtend has been uploaded to PyPI, update the conda-forge build recipe at https://github.com/conda-forge/mlxtend-feedstock by changing the version number in the recipe/meta.yaml file appropriately.","title":"5. Updating the conda-forge recipe"},{"location":"USER_GUIDE_INDEX/","text":"User Guide Index classifier Adaline EnsembleVoteClassifier LogisticRegression MultiLayerPerceptron Perceptron SoftmaxRegression StackingClassifier StackingCVClassifier cluster Kmeans data autompg_data boston_housing_data iris_data loadlocal_mnist make_multiplexer_dataset mnist_data three_blobs_data wine_data evaluate bootstrap bootstrap_point632_score BootstrapOutOfBag cochrans_q confusion_matrix combined_ftest_5x2cv feature_importance_permutation ftest lift_score mcnemar_table mcnemar_tables mcnemar paired_ttest_5x2cv paired_ttest_kfold_cv paired_ttest_resampled permutation_test PredefinedHoldoutSplit proportion_difference RandomHoldoutSplit scoring feature_extraction LinearDiscriminantAnalysis PrincipalComponentAnalysis RBFKernelPCA feature_selection ColumnSelector ExhaustiveFeatureSelector SequentialFeatureSelector file_io find_filegroups find_files frequent_patterns apriori association_rules general concepts activation-functions gradient-optimization linear-gradient-derivative regularization-linear image extract_face_landmarks math num_combinations num_permutations plotting category_scatter checkerboard_plot ecdf enrichment_plot plot_confusion_matrix plot_decision_regions plot_learning_curves plot_linear_regression plot_sequential_feature_selection scatterplotmatrix stacked_barplot preprocessing CopyTransformer DenseTransformer MeanCenterer minmax_scaling one-hot_encoding shuffle_arrays_unison standardize TransactionEncoder regressor LinearRegression StackingCVRegressor StackingRegressor text generalize_names generalize_names_duplcheck tokenizer utils Counter","title":"User Guide Index"},{"location":"USER_GUIDE_INDEX/#user-guide-index","text":"","title":"User Guide Index"},{"location":"USER_GUIDE_INDEX/#classifier","text":"Adaline EnsembleVoteClassifier LogisticRegression MultiLayerPerceptron Perceptron SoftmaxRegression StackingClassifier StackingCVClassifier","title":"classifier"},{"location":"USER_GUIDE_INDEX/#cluster","text":"Kmeans","title":"cluster"},{"location":"USER_GUIDE_INDEX/#data","text":"autompg_data boston_housing_data iris_data loadlocal_mnist make_multiplexer_dataset mnist_data three_blobs_data wine_data","title":"data"},{"location":"USER_GUIDE_INDEX/#evaluate","text":"bootstrap bootstrap_point632_score BootstrapOutOfBag cochrans_q confusion_matrix combined_ftest_5x2cv feature_importance_permutation ftest lift_score mcnemar_table mcnemar_tables mcnemar paired_ttest_5x2cv paired_ttest_kfold_cv paired_ttest_resampled permutation_test PredefinedHoldoutSplit proportion_difference RandomHoldoutSplit scoring","title":"evaluate"},{"location":"USER_GUIDE_INDEX/#feature_extraction","text":"LinearDiscriminantAnalysis PrincipalComponentAnalysis RBFKernelPCA","title":"feature_extraction"},{"location":"USER_GUIDE_INDEX/#feature_selection","text":"ColumnSelector ExhaustiveFeatureSelector SequentialFeatureSelector","title":"feature_selection"},{"location":"USER_GUIDE_INDEX/#file_io","text":"find_filegroups find_files","title":"file_io"},{"location":"USER_GUIDE_INDEX/#frequent_patterns","text":"apriori association_rules","title":"frequent_patterns"},{"location":"USER_GUIDE_INDEX/#general-concepts","text":"activation-functions gradient-optimization linear-gradient-derivative regularization-linear","title":"general concepts"},{"location":"USER_GUIDE_INDEX/#image","text":"extract_face_landmarks","title":"image"},{"location":"USER_GUIDE_INDEX/#math","text":"num_combinations num_permutations","title":"math"},{"location":"USER_GUIDE_INDEX/#plotting","text":"category_scatter checkerboard_plot ecdf enrichment_plot plot_confusion_matrix plot_decision_regions plot_learning_curves plot_linear_regression plot_sequential_feature_selection scatterplotmatrix stacked_barplot","title":"plotting"},{"location":"USER_GUIDE_INDEX/#preprocessing","text":"CopyTransformer DenseTransformer MeanCenterer minmax_scaling one-hot_encoding shuffle_arrays_unison standardize TransactionEncoder","title":"preprocessing"},{"location":"USER_GUIDE_INDEX/#regressor","text":"LinearRegression StackingCVRegressor StackingRegressor","title":"regressor"},{"location":"USER_GUIDE_INDEX/#text","text":"generalize_names generalize_names_duplcheck tokenizer","title":"text"},{"location":"USER_GUIDE_INDEX/#utils","text":"Counter","title":"utils"},{"location":"cite/","text":"Citing mlxtend If you use mlxtend as part of your workflow in a scientific publication, please consider citing the mlxtend repository with the following DOI: Raschka, Sebastian (2018) MLxtend: Providing machine learning and data science utilities and extensions to Python's scientific computing stack . J Open Source Softw 3(24). @article{raschkas_2018_mlxtend, author = {Sebastian Raschka}, title = {MLxtend: Providing machine learning and data science utilities and extensions to Python\u2019s scientific computing stack}, journal = {The Journal of Open Source Software}, volume = {3}, number = {24}, month = apr, year = 2018, publisher = {The Open Journal}, doi = {10.21105/joss.00638}, url = {http://joss.theoj.org/papers/10.21105/joss.00638} }","title":"Citing Mlxtend"},{"location":"cite/#citing-mlxtend","text":"If you use mlxtend as part of your workflow in a scientific publication, please consider citing the mlxtend repository with the following DOI: Raschka, Sebastian (2018) MLxtend: Providing machine learning and data science utilities and extensions to Python's scientific computing stack . J Open Source Softw 3(24). @article{raschkas_2018_mlxtend, author = {Sebastian Raschka}, title = {MLxtend: Providing machine learning and data science utilities and extensions to Python\u2019s scientific computing stack}, journal = {The Journal of Open Source Software}, volume = {3}, number = {24}, month = apr, year = 2018, publisher = {The Open Journal}, doi = {10.21105/joss.00638}, url = {http://joss.theoj.org/papers/10.21105/joss.00638} }","title":"Citing mlxtend"},{"location":"contributors/","text":"Contributors For the current list of contributors to mlxtend, please see the GitHub contributor page at https://github.com/rasbt/mlxtend/graphs/contributors .","title":"Contributors"},{"location":"contributors/#contributors","text":"For the current list of contributors to mlxtend, please see the GitHub contributor page at https://github.com/rasbt/mlxtend/graphs/contributors .","title":"Contributors"},{"location":"discuss/","text":"Discuss Any questions or comments about mlxtend? Join the mlxtend mailing list on Google Groups!","title":"Discuss"},{"location":"discuss/#discuss","text":"Any questions or comments about mlxtend? Join the mlxtend mailing list on Google Groups!","title":"Discuss"},{"location":"installation/","text":"Installing mlxtend PyPI To install mlxtend, just execute pip install mlxtend Alternatively, you download the package manually from the Python Package Index https://pypi.python.org/pypi/mlxtend , unzip it, navigate into the package, and use the command: python setup.py install Upgrading via pip To upgrade an existing version of mlxtend from PyPI, execute pip install mlxtend --upgrade --no-deps Please note that the dependencies (NumPy and SciPy) will also be upgraded if you omit the --no-deps flag; use the --no-deps (\"no dependencies\") flag if you don't want this. Installing mlxtend from the source distribution In rare cases, users reported problems on certain systems with the default pip installation command, which installs mlxtend from the binary distribution (\"wheels\") on PyPI. If you should encounter similar problems, you could try to install mlxtend from the source distribution instead via pip install --no-binary :all: mlxtend Also, I would appreciate it if you could report any issues that occur when using pip install mlxtend in hope that we can fix these in future releases. Conda The mlxtend package is also available through conda forge . To install mlxtend using conda, use the following command: conda install mlxtend --channel conda-forge or simply conda install mlxtend if you added conda-forge to your channels ( conda config --add channels conda-forge ). Dev Version The mlxtend version on PyPI may always one step behind; you can install the latest development version from the GitHub repository by executing pip install git+git://github.com/rasbt/mlxtend.git Or, you can fork the GitHub repository from https://github.com/rasbt/mlxtend and install mlxtend from your local drive via python setup.py install","title":"Installation"},{"location":"installation/#installing-mlxtend","text":"","title":"Installing mlxtend"},{"location":"installation/#pypi","text":"To install mlxtend, just execute pip install mlxtend Alternatively, you download the package manually from the Python Package Index https://pypi.python.org/pypi/mlxtend , unzip it, navigate into the package, and use the command: python setup.py install","title":"PyPI"},{"location":"installation/#upgrading-via-pip","text":"To upgrade an existing version of mlxtend from PyPI, execute pip install mlxtend --upgrade --no-deps Please note that the dependencies (NumPy and SciPy) will also be upgraded if you omit the --no-deps flag; use the --no-deps (\"no dependencies\") flag if you don't want this.","title":"Upgrading via pip"},{"location":"installation/#installing-mlxtend-from-the-source-distribution","text":"In rare cases, users reported problems on certain systems with the default pip installation command, which installs mlxtend from the binary distribution (\"wheels\") on PyPI. If you should encounter similar problems, you could try to install mlxtend from the source distribution instead via pip install --no-binary :all: mlxtend Also, I would appreciate it if you could report any issues that occur when using pip install mlxtend in hope that we can fix these in future releases.","title":"Installing mlxtend from the source distribution"},{"location":"installation/#conda","text":"The mlxtend package is also available through conda forge . To install mlxtend using conda, use the following command: conda install mlxtend --channel conda-forge or simply conda install mlxtend if you added conda-forge to your channels ( conda config --add channels conda-forge ).","title":"Conda"},{"location":"installation/#dev-version","text":"The mlxtend version on PyPI may always one step behind; you can install the latest development version from the GitHub repository by executing pip install git+git://github.com/rasbt/mlxtend.git Or, you can fork the GitHub repository from https://github.com/rasbt/mlxtend and install mlxtend from your local drive via python setup.py install","title":"Dev Version"},{"location":"license/","text":"This project is released under a permissive new BSD open source license and commercially usable. There is no warranty; not even for merchantability or fitness for a particular purpose. In addition, you may use, copy, modify, and redistribute all artistic creative works (figures and images) included in this distribution under the directory according to the terms and conditions of the Creative Commons Attribution 4.0 International License. (Computer-generated graphics such as the plots produced by matplotlib fall under the BSD license mentioned above). new BSD License New BSD License Copyright (c) 2014-2018, Sebastian Raschka. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of mlxtend nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Creative Commons Attribution 4.0 International License mlxtend documentation figures are licensed under a Creative Commons Attribution 4.0 International License. http://creativecommons.org/licenses/by-sa/4.0/ . You are free to: Share \u2014 copy and redistribute the material in any medium or format Adapt \u2014 remix, transform, and build upon the material for any purpose, even commercially. The licensor cannot revoke these freedoms as long as you follow the license terms. Under the following terms: Attribution \u2014 You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. No additional restrictions \u2014 You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits.","title":"License"},{"location":"license/#new-bsd-license","text":"New BSD License Copyright (c) 2014-2018, Sebastian Raschka. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of mlxtend nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.","title":"new BSD License"},{"location":"license/#creative-commons-attribution-40-international-license","text":"mlxtend documentation figures are licensed under a Creative Commons Attribution 4.0 International License. http://creativecommons.org/licenses/by-sa/4.0/ .","title":"Creative Commons Attribution 4.0 International License"},{"location":"license/#you-are-free-to","text":"Share \u2014 copy and redistribute the material in any medium or format Adapt \u2014 remix, transform, and build upon the material for any purpose, even commercially. The licensor cannot revoke these freedoms as long as you follow the license terms.","title":"You are free to:"},{"location":"license/#under-the-following-terms","text":"Attribution \u2014 You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. No additional restrictions \u2014 You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits.","title":"Under the following terms:"},{"location":"api_modules/mlxtend.classifier/Adaline/","text":"Adaline Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"Adaline"},{"location":"api_modules/mlxtend.classifier/Adaline/#adaline","text":"Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/","title":"Adaline"},{"location":"api_modules/mlxtend.classifier/Adaline/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.classifier/Adaline/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/Adaline/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/Adaline/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/Adaline/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/EnsembleVoteClassifier/","text":"EnsembleVoteClassifier EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier.","title":"EnsembleVoteClassifier"},{"location":"api_modules/mlxtend.classifier/EnsembleVoteClassifier/#ensemblevoteclassifier","text":"EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/","title":"EnsembleVoteClassifier"},{"location":"api_modules/mlxtend.classifier/EnsembleVoteClassifier/#methods","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier.","title":"Methods"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/","text":"LogisticRegression LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"LogisticRegression"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#logisticregression","text":"LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/","title":"LogisticRegression"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/LogisticRegression/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/","text":"MultiLayerPerceptron MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"MultiLayerPerceptron"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#multilayerperceptron","text":"MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/","title":"MultiLayerPerceptron"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/MultiLayerPerceptron/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/Perceptron/","text":"Perceptron Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"Perceptron"},{"location":"api_modules/mlxtend.classifier/Perceptron/#perceptron","text":"Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/","title":"Perceptron"},{"location":"api_modules/mlxtend.classifier/Perceptron/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.classifier/Perceptron/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/Perceptron/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/Perceptron/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/Perceptron/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/","text":"SoftmaxRegression SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"SoftmaxRegression"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#softmaxregression","text":"SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/","title":"SoftmaxRegression"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.classifier/SoftmaxRegression/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.classifier/StackingCVClassifier/","text":"StackingCVClassifier StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingCVClassifier"},{"location":"api_modules/mlxtend.classifier/StackingCVClassifier/#stackingcvclassifier","text":"StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/","title":"StackingCVClassifier"},{"location":"api_modules/mlxtend.classifier/StackingCVClassifier/#methods","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_modules/mlxtend.classifier/StackingClassifier/","text":"StackingClassifier StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/ Methods fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingClassifier"},{"location":"api_modules/mlxtend.classifier/StackingClassifier/#stackingclassifier","text":"StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/","title":"StackingClassifier"},{"location":"api_modules/mlxtend.classifier/StackingClassifier/#methods","text":"fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_modules/mlxtend.cluster/Kmeans/","text":"Kmeans Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/ Methods fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"Kmeans"},{"location":"api_modules/mlxtend.cluster/Kmeans/#kmeans","text":"Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/","title":"Kmeans"},{"location":"api_modules/mlxtend.cluster/Kmeans/#methods","text":"fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.cluster/Kmeans/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.cluster/Kmeans/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.cluster/Kmeans/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.cluster/Kmeans/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.data/autompg_data/","text":"autompg_data autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/","title":"Autompg data"},{"location":"api_modules/mlxtend.data/autompg_data/#autompg_data","text":"autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/","title":"autompg_data"},{"location":"api_modules/mlxtend.data/boston_housing_data/","text":"boston_housing_data boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/","title":"Boston housing data"},{"location":"api_modules/mlxtend.data/boston_housing_data/#boston_housing_data","text":"boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/","title":"boston_housing_data"},{"location":"api_modules/mlxtend.data/iris_data/","text":"iris_data iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/","title":"Iris data"},{"location":"api_modules/mlxtend.data/iris_data/#iris_data","text":"iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/","title":"iris_data"},{"location":"api_modules/mlxtend.data/loadlocal_mnist/","text":"loadlocal_mnist loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/","title":"Loadlocal mnist"},{"location":"api_modules/mlxtend.data/loadlocal_mnist/#loadlocal_mnist","text":"loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/","title":"loadlocal_mnist"},{"location":"api_modules/mlxtend.data/make_multiplexer_dataset/","text":"make_multiplexer_dataset make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset","title":"Make multiplexer dataset"},{"location":"api_modules/mlxtend.data/make_multiplexer_dataset/#make_multiplexer_dataset","text":"make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset","title":"make_multiplexer_dataset"},{"location":"api_modules/mlxtend.data/mnist_data/","text":"mnist_data mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/","title":"Mnist data"},{"location":"api_modules/mlxtend.data/mnist_data/#mnist_data","text":"mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/","title":"mnist_data"},{"location":"api_modules/mlxtend.data/three_blobs_data/","text":"three_blobs_data three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data","title":"Three blobs data"},{"location":"api_modules/mlxtend.data/three_blobs_data/#three_blobs_data","text":"three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data","title":"three_blobs_data"},{"location":"api_modules/mlxtend.data/wine_data/","text":"wine_data wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"Wine data"},{"location":"api_modules/mlxtend.data/wine_data/#wine_data","text":"wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"wine_data"},{"location":"api_modules/mlxtend.evaluate/BootstrapOutOfBag/","text":"BootstrapOutOfBag BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/ Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn.","title":"BootstrapOutOfBag"},{"location":"api_modules/mlxtend.evaluate/BootstrapOutOfBag/#bootstrapoutofbag","text":"BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/","title":"BootstrapOutOfBag"},{"location":"api_modules/mlxtend.evaluate/BootstrapOutOfBag/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn.","title":"Methods"},{"location":"api_modules/mlxtend.evaluate/PredefinedHoldoutSplit/","text":"PredefinedHoldoutSplit PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting. Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"PredefinedHoldoutSplit"},{"location":"api_modules/mlxtend.evaluate/PredefinedHoldoutSplit/#predefinedholdoutsplit","text":"PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting.","title":"PredefinedHoldoutSplit"},{"location":"api_modules/mlxtend.evaluate/PredefinedHoldoutSplit/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"api_modules/mlxtend.evaluate/RandomHoldoutSplit/","text":"RandomHoldoutSplit RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"RandomHoldoutSplit"},{"location":"api_modules/mlxtend.evaluate/RandomHoldoutSplit/#randomholdoutsplit","text":"RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not","title":"RandomHoldoutSplit"},{"location":"api_modules/mlxtend.evaluate/RandomHoldoutSplit/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"api_modules/mlxtend.evaluate/bootstrap/","text":"bootstrap bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/","title":"Bootstrap"},{"location":"api_modules/mlxtend.evaluate/bootstrap/#bootstrap","text":"bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/","title":"bootstrap"},{"location":"api_modules/mlxtend.evaluate/bootstrap_point632_score/","text":"bootstrap_point632_score bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \"Estimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\" Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \"Improvements on Cross-Validation: The .632+ Bootstrap Method.\" Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/","title":"Bootstrap point632 score"},{"location":"api_modules/mlxtend.evaluate/bootstrap_point632_score/#bootstrap_point632_score","text":"bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \"Estimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\" Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \"Improvements on Cross-Validation: The .632+ Bootstrap Method.\" Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/","title":"bootstrap_point632_score"},{"location":"api_modules/mlxtend.evaluate/cochrans_q/","text":"cochrans_q cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/","title":"Cochrans q"},{"location":"api_modules/mlxtend.evaluate/cochrans_q/#cochrans_q","text":"cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/","title":"cochrans_q"},{"location":"api_modules/mlxtend.evaluate/combined_ftest_5x2cv/","text":"combined_ftest_5x2cv combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/","title":"Combined ftest 5x2cv"},{"location":"api_modules/mlxtend.evaluate/combined_ftest_5x2cv/#combined_ftest_5x2cv","text":"combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/","title":"combined_ftest_5x2cv"},{"location":"api_modules/mlxtend.evaluate/confusion_matrix/","text":"confusion_matrix confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/","title":"Confusion matrix"},{"location":"api_modules/mlxtend.evaluate/confusion_matrix/#confusion_matrix","text":"confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/","title":"confusion_matrix"},{"location":"api_modules/mlxtend.evaluate/feature_importance_permutation/","text":"feature_importance_permutation feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/","title":"Feature importance permutation"},{"location":"api_modules/mlxtend.evaluate/feature_importance_permutation/#feature_importance_permutation","text":"feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/","title":"feature_importance_permutation"},{"location":"api_modules/mlxtend.evaluate/ftest/","text":"ftest ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/","title":"Ftest"},{"location":"api_modules/mlxtend.evaluate/ftest/#ftest","text":"ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/","title":"ftest"},{"location":"api_modules/mlxtend.evaluate/lift_score/","text":"lift_score lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP / (TP+FP) ] / [ (TP+FN) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/","title":"Lift score"},{"location":"api_modules/mlxtend.evaluate/lift_score/#lift_score","text":"lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP / (TP+FP) ] / [ (TP+FN) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/","title":"lift_score"},{"location":"api_modules/mlxtend.evaluate/mcnemar/","text":"mcnemar mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/)","title":"Mcnemar"},{"location":"api_modules/mlxtend.evaluate/mcnemar/#mcnemar","text":"mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/)","title":"mcnemar"},{"location":"api_modules/mlxtend.evaluate/mcnemar_table/","text":"mcnemar_table mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/","title":"Mcnemar table"},{"location":"api_modules/mlxtend.evaluate/mcnemar_table/#mcnemar_table","text":"mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/","title":"mcnemar_table"},{"location":"api_modules/mlxtend.evaluate/mcnemar_tables/","text":"mcnemar_tables mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/)","title":"Mcnemar tables"},{"location":"api_modules/mlxtend.evaluate/mcnemar_tables/#mcnemar_tables","text":"mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/)","title":"mcnemar_tables"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_5x2cv/","text":"paired_ttest_5x2cv paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/","title":"Paired ttest 5x2cv"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_5x2cv/#paired_ttest_5x2cv","text":"paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/","title":"paired_ttest_5x2cv"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_kfold_cv/","text":"paired_ttest_kfold_cv paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/","title":"Paired ttest kfold cv"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_kfold_cv/#paired_ttest_kfold_cv","text":"paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/","title":"paired_ttest_kfold_cv"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_resampled/","text":"paired_ttest_resampled paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/","title":"Paired ttest resampled"},{"location":"api_modules/mlxtend.evaluate/paired_ttest_resampled/#paired_ttest_resampled","text":"paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/","title":"paired_ttest_resampled"},{"location":"api_modules/mlxtend.evaluate/permutation_test/","text":"permutation_test permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/","title":"Permutation test"},{"location":"api_modules/mlxtend.evaluate/permutation_test/#permutation_test","text":"permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/","title":"permutation_test"},{"location":"api_modules/mlxtend.evaluate/proportion_difference/","text":"proportion_difference proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/","title":"Proportion difference"},{"location":"api_modules/mlxtend.evaluate/proportion_difference/#proportion_difference","text":"proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/","title":"proportion_difference"},{"location":"api_modules/mlxtend.evaluate/scoring/","text":"scoring scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"Scoring"},{"location":"api_modules/mlxtend.evaluate/scoring/#scoring","text":"scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"scoring"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/","text":"LinearDiscriminantAnalysis LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/ Methods fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors.","title":"LinearDiscriminantAnalysis"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#lineardiscriminantanalysis","text":"LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/","title":"LinearDiscriminantAnalysis"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#methods","text":"fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#license-bsd-3-clause","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/LinearDiscriminantAnalysis/#license-bsd-3-clause_1","text":"transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/","text":"PrincipalComponentAnalysis PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"PrincipalComponentAnalysis"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#principalcomponentanalysis","text":"PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/","title":"PrincipalComponentAnalysis"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#methods","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#license-bsd-3-clause","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis/#license-bsd-3-clause_1","text":"transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/","text":"RBFKernelPCA RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"RBFKernelPCA"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#rbfkernelpca","text":"RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/","title":"RBFKernelPCA"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#methods","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#license-bsd-3-clause","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.feature_extraction/RBFKernelPCA/#license-bsd-3-clause_1","text":"transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.feature_selection/ColumnSelector/","text":"ColumnSelector ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features","title":"ColumnSelector"},{"location":"api_modules/mlxtend.feature_selection/ColumnSelector/#columnselector","text":"ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/","title":"ColumnSelector"},{"location":"api_modules/mlxtend.feature_selection/ColumnSelector/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features","title":"Methods"},{"location":"api_modules/mlxtend.feature_selection/ExhaustiveFeatureSelector/","text":"ExhaustiveFeatureSelector ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features}","title":"ExhaustiveFeatureSelector"},{"location":"api_modules/mlxtend.feature_selection/ExhaustiveFeatureSelector/#exhaustivefeatureselector","text":"ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/","title":"ExhaustiveFeatureSelector"},{"location":"api_modules/mlxtend.feature_selection/ExhaustiveFeatureSelector/#methods","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"api_modules/mlxtend.feature_selection/SequentialFeatureSelector/","text":"SequentialFeatureSelector SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"SequentialFeatureSelector"},{"location":"api_modules/mlxtend.feature_selection/SequentialFeatureSelector/#sequentialfeatureselector","text":"SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/","title":"SequentialFeatureSelector"},{"location":"api_modules/mlxtend.feature_selection/SequentialFeatureSelector/#methods","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"api_modules/mlxtend.file_io/find_filegroups/","text":"find_filegroups find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/","title":"Find filegroups"},{"location":"api_modules/mlxtend.file_io/find_filegroups/#find_filegroups","text":"find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/","title":"find_filegroups"},{"location":"api_modules/mlxtend.file_io/find_files/","text":"find_files find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"Find files"},{"location":"api_modules/mlxtend.file_io/find_files/#find_files","text":"find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"find_files"},{"location":"api_modules/mlxtend.frequent_patterns/apriori/","text":"apriori apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/","title":"Apriori"},{"location":"api_modules/mlxtend.frequent_patterns/apriori/#apriori","text":"apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/","title":"apriori"},{"location":"api_modules/mlxtend.frequent_patterns/association_rules/","text":"association_rules association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"Association rules"},{"location":"api_modules/mlxtend.frequent_patterns/association_rules/#association_rules","text":"association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"association_rules"},{"location":"api_modules/mlxtend.image/extract_face_landmarks/","text":"extract_face_landmarks extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"Extract face landmarks"},{"location":"api_modules/mlxtend.image/extract_face_landmarks/#extract_face_landmarks","text":"extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"extract_face_landmarks"},{"location":"api_modules/mlxtend.math/factorial/","text":"factorial factorial(n) None","title":"Factorial"},{"location":"api_modules/mlxtend.math/factorial/#factorial","text":"factorial(n) None","title":"factorial"},{"location":"api_modules/mlxtend.math/num_combinations/","text":"num_combinations num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/","title":"Num combinations"},{"location":"api_modules/mlxtend.math/num_combinations/#num_combinations","text":"num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/","title":"num_combinations"},{"location":"api_modules/mlxtend.math/num_permutations/","text":"num_permutations num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/","title":"Num permutations"},{"location":"api_modules/mlxtend.math/num_permutations/#num_permutations","text":"num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/","title":"num_permutations"},{"location":"api_modules/mlxtend.math/vectorspace_dimensionality/","text":"vectorspace_dimensionality vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set","title":"Vectorspace dimensionality"},{"location":"api_modules/mlxtend.math/vectorspace_dimensionality/#vectorspace_dimensionality","text":"vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set","title":"vectorspace_dimensionality"},{"location":"api_modules/mlxtend.math/vectorspace_orthonormalization/","text":"vectorspace_orthonormalization vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of orthogonal vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"Vectorspace orthonormalization"},{"location":"api_modules/mlxtend.math/vectorspace_orthonormalization/#vectorspace_orthonormalization","text":"vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of orthogonal vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"vectorspace_orthonormalization"},{"location":"api_modules/mlxtend.plotting/category_scatter/","text":"category_scatter category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/","title":"Category scatter"},{"location":"api_modules/mlxtend.plotting/category_scatter/#category_scatter","text":"category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/","title":"category_scatter"},{"location":"api_modules/mlxtend.plotting/checkerboard_plot/","text":"checkerboard_plot checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/","title":"Checkerboard plot"},{"location":"api_modules/mlxtend.plotting/checkerboard_plot/#checkerboard_plot","text":"checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/","title":"checkerboard_plot"},{"location":"api_modules/mlxtend.plotting/ecdf/","text":"ecdf ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/","title":"Ecdf"},{"location":"api_modules/mlxtend.plotting/ecdf/#ecdf","text":"ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/","title":"ecdf"},{"location":"api_modules/mlxtend.plotting/enrichment_plot/","text":"enrichment_plot enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/","title":"Enrichment plot"},{"location":"api_modules/mlxtend.plotting/enrichment_plot/#enrichment_plot","text":"enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/","title":"enrichment_plot"},{"location":"api_modules/mlxtend.plotting/plot_confusion_matrix/","text":"plot_confusion_matrix plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/","title":"Plot confusion matrix"},{"location":"api_modules/mlxtend.plotting/plot_confusion_matrix/#plot_confusion_matrix","text":"plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/","title":"plot_confusion_matrix"},{"location":"api_modules/mlxtend.plotting/plot_decision_regions/","text":"plot_decision_regions plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/","title":"Plot decision regions"},{"location":"api_modules/mlxtend.plotting/plot_decision_regions/#plot_decision_regions","text":"plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/","title":"plot_decision_regions"},{"location":"api_modules/mlxtend.plotting/plot_learning_curves/","text":"plot_learning_curves plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_learning_curves/","title":"Plot learning curves"},{"location":"api_modules/mlxtend.plotting/plot_learning_curves/#plot_learning_curves","text":"plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_learning_curves/","title":"plot_learning_curves"},{"location":"api_modules/mlxtend.plotting/plot_linear_regression/","text":"plot_linear_regression plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/","title":"Plot linear regression"},{"location":"api_modules/mlxtend.plotting/plot_linear_regression/#plot_linear_regression","text":"plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/","title":"plot_linear_regression"},{"location":"api_modules/mlxtend.plotting/plot_sequential_feature_selection/","text":"plot_sequential_feature_selection plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/","title":"Plot sequential feature selection"},{"location":"api_modules/mlxtend.plotting/plot_sequential_feature_selection/#plot_sequential_feature_selection","text":"plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/","title":"plot_sequential_feature_selection"},{"location":"api_modules/mlxtend.plotting/remove_borders/","text":"remove_borders remove_borders(axes, left=False, bottom=False, right=True, top=True) Remove chart junk from matplotlib plots. Parameters axes : iterable An iterable containing plt.gca() or plt.subplot() objects, e.g. [plt.gca()]. left : bool (default: False ) Hide left axis spine if True. bottom : bool (default: False ) Hide bottom axis spine if True. right : bool (default: True ) Hide right axis spine if True. top : bool (default: True ) Hide top axis spine if True. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/remove_chartjunk/","title":"Remove borders"},{"location":"api_modules/mlxtend.plotting/remove_borders/#remove_borders","text":"remove_borders(axes, left=False, bottom=False, right=True, top=True) Remove chart junk from matplotlib plots. Parameters axes : iterable An iterable containing plt.gca() or plt.subplot() objects, e.g. [plt.gca()]. left : bool (default: False ) Hide left axis spine if True. bottom : bool (default: False ) Hide bottom axis spine if True. right : bool (default: True ) Hide right axis spine if True. top : bool (default: True ) Hide top axis spine if True. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/remove_chartjunk/","title":"remove_borders"},{"location":"api_modules/mlxtend.plotting/scatterplotmatrix/","text":"scatterplotmatrix scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...)","title":"Scatterplotmatrix"},{"location":"api_modules/mlxtend.plotting/scatterplotmatrix/#scatterplotmatrix","text":"scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...)","title":"scatterplotmatrix"},{"location":"api_modules/mlxtend.plotting/stacked_barplot/","text":"stacked_barplot stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"Stacked barplot"},{"location":"api_modules/mlxtend.plotting/stacked_barplot/#stacked_barplot","text":"stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"stacked_barplot"},{"location":"api_modules/mlxtend.preprocessing/CopyTransformer/","text":"CopyTransformer CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array.","title":"CopyTransformer"},{"location":"api_modules/mlxtend.preprocessing/CopyTransformer/#copytransformer","text":"CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/","title":"CopyTransformer"},{"location":"api_modules/mlxtend.preprocessing/CopyTransformer/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array.","title":"Methods"},{"location":"api_modules/mlxtend.preprocessing/DenseTransformer/","text":"DenseTransformer DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array.","title":"DenseTransformer"},{"location":"api_modules/mlxtend.preprocessing/DenseTransformer/#densetransformer","text":"DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/","title":"DenseTransformer"},{"location":"api_modules/mlxtend.preprocessing/DenseTransformer/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array.","title":"Methods"},{"location":"api_modules/mlxtend.preprocessing/MeanCenterer/","text":"MeanCenterer MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/ Methods fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered.","title":"MeanCenterer"},{"location":"api_modules/mlxtend.preprocessing/MeanCenterer/#meancenterer","text":"MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/","title":"MeanCenterer"},{"location":"api_modules/mlxtend.preprocessing/MeanCenterer/#methods","text":"fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered.","title":"Methods"},{"location":"api_modules/mlxtend.preprocessing/OnehotTransactions/","text":"OnehotTransactions OnehotTransactions( args, * kwargs) Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/ Methods fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"OnehotTransactions"},{"location":"api_modules/mlxtend.preprocessing/OnehotTransactions/#onehottransactions","text":"OnehotTransactions( args, * kwargs) Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/","title":"OnehotTransactions"},{"location":"api_modules/mlxtend.preprocessing/OnehotTransactions/#methods","text":"fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"Methods"},{"location":"api_modules/mlxtend.preprocessing/TransactionEncoder/","text":"TransactionEncoder TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/ Methods fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"TransactionEncoder"},{"location":"api_modules/mlxtend.preprocessing/TransactionEncoder/#transactionencoder","text":"TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/","title":"TransactionEncoder"},{"location":"api_modules/mlxtend.preprocessing/TransactionEncoder/#methods","text":"fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"Methods"},{"location":"api_modules/mlxtend.preprocessing/minmax_scaling/","text":"minmax_scaling minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/","title":"Minmax scaling"},{"location":"api_modules/mlxtend.preprocessing/minmax_scaling/#minmax_scaling","text":"minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/","title":"minmax_scaling"},{"location":"api_modules/mlxtend.preprocessing/one_hot/","text":"one_hot one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/","title":"One hot"},{"location":"api_modules/mlxtend.preprocessing/one_hot/#one_hot","text":"one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/","title":"one_hot"},{"location":"api_modules/mlxtend.preprocessing/shuffle_arrays_unison/","text":"shuffle_arrays_unison shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/","title":"Shuffle arrays unison"},{"location":"api_modules/mlxtend.preprocessing/shuffle_arrays_unison/#shuffle_arrays_unison","text":"shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/","title":"shuffle_arrays_unison"},{"location":"api_modules/mlxtend.preprocessing/standardize/","text":"standardize standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"Standardize"},{"location":"api_modules/mlxtend.preprocessing/standardize/#standardize","text":"standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"standardize"},{"location":"api_modules/mlxtend.regressor/LinearRegression/","text":"LinearRegression LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"LinearRegression"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#linearregression","text":"LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/","title":"LinearRegression"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_modules/mlxtend.regressor/LinearRegression/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_modules/mlxtend.regressor/StackingCVRegressor/","text":"StackingCVRegressor StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingCVRegressor"},{"location":"api_modules/mlxtend.regressor/StackingCVRegressor/#stackingcvregressor","text":"StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/","title":"StackingCVRegressor"},{"location":"api_modules/mlxtend.regressor/StackingCVRegressor/#methods","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_modules/mlxtend.regressor/StackingRegressor/","text":"StackingRegressor StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self Properties coef_ None intercept_ None","title":"StackingRegressor"},{"location":"api_modules/mlxtend.regressor/StackingRegressor/#stackingregressor","text":"StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/","title":"StackingRegressor"},{"location":"api_modules/mlxtend.regressor/StackingRegressor/#methods","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_modules/mlxtend.regressor/StackingRegressor/#properties","text":"coef_ None intercept_ None","title":"Properties"},{"location":"api_modules/mlxtend.text/generalize_names/","text":"generalize_names generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/","title":"Generalize names"},{"location":"api_modules/mlxtend.text/generalize_names/#generalize_names","text":"generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/","title":"generalize_names"},{"location":"api_modules/mlxtend.text/generalize_names_duplcheck/","text":"generalize_names_duplcheck generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/","title":"Generalize names duplcheck"},{"location":"api_modules/mlxtend.text/generalize_names_duplcheck/#generalize_names_duplcheck","text":"generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/","title":"generalize_names_duplcheck"},{"location":"api_modules/mlxtend.text/tokenizer_emoticons/","text":"tokenizer_emoticons tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/","title":"Tokenizer emoticons"},{"location":"api_modules/mlxtend.text/tokenizer_emoticons/#tokenizer_emoticons","text":"tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/","title":"tokenizer_emoticons"},{"location":"api_modules/mlxtend.text/tokenizer_words_and_emoticons/","text":"tokenizer_words_and_emoticons tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"Tokenizer words and emoticons"},{"location":"api_modules/mlxtend.text/tokenizer_words_and_emoticons/#tokenizer_words_and_emoticons","text":"tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"tokenizer_words_and_emoticons"},{"location":"api_modules/mlxtend.utils/Counter/","text":"Counter Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/ Methods update() Print current iteration and time elapsed.","title":"Counter"},{"location":"api_modules/mlxtend.utils/Counter/#counter","text":"Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/","title":"Counter"},{"location":"api_modules/mlxtend.utils/Counter/#methods","text":"update() Print current iteration and time elapsed.","title":"Methods"},{"location":"api_modules/mlxtend.utils/assert_raises/","text":"assert_raises assert_raises(exception_type, message, func, args, * kwargs) Check that an exception is raised with a specific message Parameters exception_type : exception The exception that should be raised message : str (default: None) The error message that should be raised. Ignored if False or None. func : callable The function that raises the exception *args : positional arguments to func . **kwargs : keyword arguments to func","title":"Assert raises"},{"location":"api_modules/mlxtend.utils/assert_raises/#assert_raises","text":"assert_raises(exception_type, message, func, args, * kwargs) Check that an exception is raised with a specific message Parameters exception_type : exception The exception that should be raised message : str (default: None) The error message that should be raised. Ignored if False or None. func : callable The function that raises the exception *args : positional arguments to func . **kwargs : keyword arguments to func","title":"assert_raises"},{"location":"api_modules/mlxtend.utils/check_Xy/","text":"check_Xy check_Xy(X, y, y_int=True) None","title":"check Xy"},{"location":"api_modules/mlxtend.utils/check_Xy/#check_xy","text":"check_Xy(X, y, y_int=True) None","title":"check_Xy"},{"location":"api_modules/mlxtend.utils/format_kwarg_dictionaries/","text":"format_kwarg_dictionaries format_kwarg_dictionaries(default_kwargs=None, user_kwargs=None, protected_keys=None) Function to combine default and user specified kwargs dictionaries Parameters default_kwargs : dict, optional Default kwargs (default is None). user_kwargs : dict, optional User specified kwargs (default is None). protected_keys : array_like, optional Sequence of keys to be removed from the returned dictionary (default is None). Returns formatted_kwargs : dict Formatted kwargs dictionary.","title":"Format kwarg dictionaries"},{"location":"api_modules/mlxtend.utils/format_kwarg_dictionaries/#format_kwarg_dictionaries","text":"format_kwarg_dictionaries(default_kwargs=None, user_kwargs=None, protected_keys=None) Function to combine default and user specified kwargs dictionaries Parameters default_kwargs : dict, optional Default kwargs (default is None). user_kwargs : dict, optional User specified kwargs (default is None). protected_keys : array_like, optional Sequence of keys to be removed from the returned dictionary (default is None). Returns formatted_kwargs : dict Formatted kwargs dictionary.","title":"format_kwarg_dictionaries"},{"location":"api_subpackages/mlxtend._base/","text":"mlxtend version: 0.14.0dev","title":"Mlxtend. base"},{"location":"api_subpackages/mlxtend.classifier/","text":"mlxtend version: 0.14.0dev Adaline Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause EnsembleVoteClassifier EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier. LogisticRegression LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause MultiLayerPerceptron MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause Perceptron Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause SoftmaxRegression SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause StackingCVClassifier StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self StackingClassifier StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/ Methods fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Mlxtend.classifier"},{"location":"api_subpackages/mlxtend.classifier/#adaline","text":"Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/","title":"Adaline"},{"location":"api_subpackages/mlxtend.classifier/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#ensemblevoteclassifier","text":"EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/","title":"EnsembleVoteClassifier"},{"location":"api_subpackages/mlxtend.classifier/#methods_1","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier.","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#logisticregression","text":"LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/","title":"LogisticRegression"},{"location":"api_subpackages/mlxtend.classifier/#methods_2","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_2","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_2","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_3","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_3","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#multilayerperceptron","text":"MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/","title":"MultiLayerPerceptron"},{"location":"api_subpackages/mlxtend.classifier/#methods_3","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_4","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_4","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_5","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_5","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#perceptron","text":"Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/","title":"Perceptron"},{"location":"api_subpackages/mlxtend.classifier/#methods_4","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_6","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_6","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_7","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_7","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#softmaxregression","text":"SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/","title":"SoftmaxRegression"},{"location":"api_subpackages/mlxtend.classifier/#methods_5","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_8","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_8","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score). set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_9","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.classifier/#license-bsd-3-clause_9","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.classifier/#stackingcvclassifier","text":"StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/","title":"StackingCVClassifier"},{"location":"api_subpackages/mlxtend.classifier/#methods_6","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_subpackages/mlxtend.classifier/#stackingclassifier","text":"StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/","title":"StackingClassifier"},{"location":"api_subpackages/mlxtend.classifier/#methods_7","text":"fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_subpackages/mlxtend.cluster/","text":"mlxtend version: 0.14.0dev Kmeans Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/ Methods fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"Mlxtend.cluster"},{"location":"api_subpackages/mlxtend.cluster/#kmeans","text":"Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/","title":"Kmeans"},{"location":"api_subpackages/mlxtend.cluster/#methods","text":"fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.cluster/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.cluster/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.cluster/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.cluster/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.data/","text":"mlxtend version: 0.14.0dev autompg_data autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/ boston_housing_data boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/ iris_data iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/ loadlocal_mnist loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/ make_multiplexer_dataset make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset mnist_data mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/ three_blobs_data three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data wine_data wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"Mlxtend.data"},{"location":"api_subpackages/mlxtend.data/#autompg_data","text":"autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/","title":"autompg_data"},{"location":"api_subpackages/mlxtend.data/#boston_housing_data","text":"boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/","title":"boston_housing_data"},{"location":"api_subpackages/mlxtend.data/#iris_data","text":"iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/","title":"iris_data"},{"location":"api_subpackages/mlxtend.data/#loadlocal_mnist","text":"loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/","title":"loadlocal_mnist"},{"location":"api_subpackages/mlxtend.data/#make_multiplexer_dataset","text":"make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset","title":"make_multiplexer_dataset"},{"location":"api_subpackages/mlxtend.data/#mnist_data","text":"mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/","title":"mnist_data"},{"location":"api_subpackages/mlxtend.data/#three_blobs_data","text":"three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data","title":"three_blobs_data"},{"location":"api_subpackages/mlxtend.data/#wine_data","text":"wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"wine_data"},{"location":"api_subpackages/mlxtend.evaluate/","text":"mlxtend version: 0.14.0dev BootstrapOutOfBag BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/ Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. PredefinedHoldoutSplit PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting. Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split. RandomHoldoutSplit RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split. bootstrap bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/ bootstrap_point632_score bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \"Estimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\" Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \"Improvements on Cross-Validation: The .632+ Bootstrap Method.\" Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/ cochrans_q cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/ combined_ftest_5x2cv combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/ confusion_matrix confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/ feature_importance_permutation feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/ ftest ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/ lift_score lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP / (TP+FP) ] / [ (TP+FN) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/ mcnemar mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/) mcnemar_table mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/ mcnemar_tables mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/) paired_ttest_5x2cv paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/ paired_ttest_kfold_cv paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/ paired_ttest_resampled paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/ permutation_test permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/ proportion_difference proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/ scoring scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"Mlxtend.evaluate"},{"location":"api_subpackages/mlxtend.evaluate/#bootstrapoutofbag","text":"BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/","title":"BootstrapOutOfBag"},{"location":"api_subpackages/mlxtend.evaluate/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn.","title":"Methods"},{"location":"api_subpackages/mlxtend.evaluate/#predefinedholdoutsplit","text":"PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting.","title":"PredefinedHoldoutSplit"},{"location":"api_subpackages/mlxtend.evaluate/#methods_1","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"api_subpackages/mlxtend.evaluate/#randomholdoutsplit","text":"RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not","title":"RandomHoldoutSplit"},{"location":"api_subpackages/mlxtend.evaluate/#methods_2","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"api_subpackages/mlxtend.evaluate/#bootstrap","text":"bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/","title":"bootstrap"},{"location":"api_subpackages/mlxtend.evaluate/#bootstrap_point632_score","text":"bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \"Estimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\" Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \"Improvements on Cross-Validation: The .632+ Bootstrap Method.\" Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/","title":"bootstrap_point632_score"},{"location":"api_subpackages/mlxtend.evaluate/#cochrans_q","text":"cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/","title":"cochrans_q"},{"location":"api_subpackages/mlxtend.evaluate/#combined_ftest_5x2cv","text":"combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/","title":"combined_ftest_5x2cv"},{"location":"api_subpackages/mlxtend.evaluate/#confusion_matrix","text":"confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/","title":"confusion_matrix"},{"location":"api_subpackages/mlxtend.evaluate/#feature_importance_permutation","text":"feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/","title":"feature_importance_permutation"},{"location":"api_subpackages/mlxtend.evaluate/#ftest","text":"ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/","title":"ftest"},{"location":"api_subpackages/mlxtend.evaluate/#lift_score","text":"lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP / (TP+FP) ] / [ (TP+FN) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/","title":"lift_score"},{"location":"api_subpackages/mlxtend.evaluate/#mcnemar","text":"mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/)","title":"mcnemar"},{"location":"api_subpackages/mlxtend.evaluate/#mcnemar_table","text":"mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/","title":"mcnemar_table"},{"location":"api_subpackages/mlxtend.evaluate/#mcnemar_tables","text":"mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/)","title":"mcnemar_tables"},{"location":"api_subpackages/mlxtend.evaluate/#paired_ttest_5x2cv","text":"paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/","title":"paired_ttest_5x2cv"},{"location":"api_subpackages/mlxtend.evaluate/#paired_ttest_kfold_cv","text":"paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/","title":"paired_ttest_kfold_cv"},{"location":"api_subpackages/mlxtend.evaluate/#paired_ttest_resampled","text":"paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/","title":"paired_ttest_resampled"},{"location":"api_subpackages/mlxtend.evaluate/#permutation_test","text":"permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/","title":"permutation_test"},{"location":"api_subpackages/mlxtend.evaluate/#proportion_difference","text":"proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/","title":"proportion_difference"},{"location":"api_subpackages/mlxtend.evaluate/#scoring","text":"scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"scoring"},{"location":"api_subpackages/mlxtend.externals/","text":"mlxtend version: 0.14.0dev","title":"Mlxtend.externals"},{"location":"api_subpackages/mlxtend.feature_extraction/","text":"mlxtend version: 0.14.0dev LinearDiscriminantAnalysis LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/ Methods fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors. PrincipalComponentAnalysis PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors. RBFKernelPCA RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"Mlxtend.feature extraction"},{"location":"api_subpackages/mlxtend.feature_extraction/#lineardiscriminantanalysis","text":"LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/","title":"LinearDiscriminantAnalysis"},{"location":"api_subpackages/mlxtend.feature_extraction/#methods","text":"fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause_1","text":"transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_extraction/#principalcomponentanalysis","text":"PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/","title":"PrincipalComponentAnalysis"},{"location":"api_subpackages/mlxtend.feature_extraction/#methods_1","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_2","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause_2","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_3","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause_3","text":"transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_extraction/#rbfkernelpca","text":"RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/","title":"RBFKernelPCA"},{"location":"api_subpackages/mlxtend.feature_extraction/#methods_2","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_4","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause_4","text":"set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_extraction/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_5","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.feature_extraction/#license-bsd-3-clause_5","text":"transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.feature_selection/","text":"mlxtend version: 0.14.0dev ColumnSelector ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features ExhaustiveFeatureSelector ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features} SequentialFeatureSelector SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"Mlxtend.feature selection"},{"location":"api_subpackages/mlxtend.feature_selection/#columnselector","text":"ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/","title":"ColumnSelector"},{"location":"api_subpackages/mlxtend.feature_selection/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features","title":"Methods"},{"location":"api_subpackages/mlxtend.feature_selection/#exhaustivefeatureselector","text":"ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/","title":"ExhaustiveFeatureSelector"},{"location":"api_subpackages/mlxtend.feature_selection/#methods_1","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"api_subpackages/mlxtend.feature_selection/#sequentialfeatureselector","text":"SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/","title":"SequentialFeatureSelector"},{"location":"api_subpackages/mlxtend.feature_selection/#methods_2","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"api_subpackages/mlxtend.file_io/","text":"mlxtend version: 0.14.0dev find_filegroups find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/ find_files find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"Mlxtend.file io"},{"location":"api_subpackages/mlxtend.file_io/#find_filegroups","text":"find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/","title":"find_filegroups"},{"location":"api_subpackages/mlxtend.file_io/#find_files","text":"find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"find_files"},{"location":"api_subpackages/mlxtend.frequent_patterns/","text":"mlxtend version: 0.14.0dev apriori apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/ association_rules association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"Mlxtend.frequent patterns"},{"location":"api_subpackages/mlxtend.frequent_patterns/#apriori","text":"apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/","title":"apriori"},{"location":"api_subpackages/mlxtend.frequent_patterns/#association_rules","text":"association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"association_rules"},{"location":"api_subpackages/mlxtend.image/","text":"mlxtend version: 0.14.0dev extract_face_landmarks extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"Mlxtend.image"},{"location":"api_subpackages/mlxtend.image/#extract_face_landmarks","text":"extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"extract_face_landmarks"},{"location":"api_subpackages/mlxtend.math/","text":"mlxtend version: 0.14.0dev factorial factorial(n) None num_combinations num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/ num_permutations num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/ vectorspace_dimensionality vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set vectorspace_orthonormalization vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of orthogonal vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"Mlxtend.math"},{"location":"api_subpackages/mlxtend.math/#factorial","text":"factorial(n) None","title":"factorial"},{"location":"api_subpackages/mlxtend.math/#num_combinations","text":"num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/","title":"num_combinations"},{"location":"api_subpackages/mlxtend.math/#num_permutations","text":"num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/","title":"num_permutations"},{"location":"api_subpackages/mlxtend.math/#vectorspace_dimensionality","text":"vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set","title":"vectorspace_dimensionality"},{"location":"api_subpackages/mlxtend.math/#vectorspace_orthonormalization","text":"vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of orthogonal vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] An orthogonal set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"vectorspace_orthonormalization"},{"location":"api_subpackages/mlxtend.plotting/","text":"mlxtend version: 0.14.0dev category_scatter category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/ checkerboard_plot checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/ ecdf ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/ enrichment_plot enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/ plot_confusion_matrix plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/ plot_decision_regions plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/ plot_learning_curves plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_learning_curves/ plot_linear_regression plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/ plot_sequential_feature_selection plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/ remove_borders remove_borders(axes, left=False, bottom=False, right=True, top=True) Remove chart junk from matplotlib plots. Parameters axes : iterable An iterable containing plt.gca() or plt.subplot() objects, e.g. [plt.gca()]. left : bool (default: False ) Hide left axis spine if True. bottom : bool (default: False ) Hide bottom axis spine if True. right : bool (default: True ) Hide right axis spine if True. top : bool (default: True ) Hide top axis spine if True. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/remove_chartjunk/ scatterplotmatrix scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) stacked_barplot stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"Mlxtend.plotting"},{"location":"api_subpackages/mlxtend.plotting/#category_scatter","text":"category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/","title":"category_scatter"},{"location":"api_subpackages/mlxtend.plotting/#checkerboard_plot","text":"checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/","title":"checkerboard_plot"},{"location":"api_subpackages/mlxtend.plotting/#ecdf","text":"ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/","title":"ecdf"},{"location":"api_subpackages/mlxtend.plotting/#enrichment_plot","text":"enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/","title":"enrichment_plot"},{"location":"api_subpackages/mlxtend.plotting/#plot_confusion_matrix","text":"plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/","title":"plot_confusion_matrix"},{"location":"api_subpackages/mlxtend.plotting/#plot_decision_regions","text":"plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/","title":"plot_decision_regions"},{"location":"api_subpackages/mlxtend.plotting/#plot_learning_curves","text":"plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_learning_curves/","title":"plot_learning_curves"},{"location":"api_subpackages/mlxtend.plotting/#plot_linear_regression","text":"plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/","title":"plot_linear_regression"},{"location":"api_subpackages/mlxtend.plotting/#plot_sequential_feature_selection","text":"plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/","title":"plot_sequential_feature_selection"},{"location":"api_subpackages/mlxtend.plotting/#remove_borders","text":"remove_borders(axes, left=False, bottom=False, right=True, top=True) Remove chart junk from matplotlib plots. Parameters axes : iterable An iterable containing plt.gca() or plt.subplot() objects, e.g. [plt.gca()]. left : bool (default: False ) Hide left axis spine if True. bottom : bool (default: False ) Hide bottom axis spine if True. right : bool (default: True ) Hide right axis spine if True. top : bool (default: True ) Hide top axis spine if True. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/remove_chartjunk/","title":"remove_borders"},{"location":"api_subpackages/mlxtend.plotting/#scatterplotmatrix","text":"scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...)","title":"scatterplotmatrix"},{"location":"api_subpackages/mlxtend.plotting/#stacked_barplot","text":"stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"stacked_barplot"},{"location":"api_subpackages/mlxtend.preprocessing/","text":"mlxtend version: 0.14.0dev CopyTransformer CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. DenseTransformer DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. MeanCenterer MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/ Methods fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. OnehotTransactions OnehotTransactions( args, * kwargs) Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/ Methods fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] TransactionEncoder TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/ Methods fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] minmax_scaling minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/ one_hot one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/ shuffle_arrays_unison shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/ standardize standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"Mlxtend.preprocessing"},{"location":"api_subpackages/mlxtend.preprocessing/#copytransformer","text":"CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/","title":"CopyTransformer"},{"location":"api_subpackages/mlxtend.preprocessing/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array.","title":"Methods"},{"location":"api_subpackages/mlxtend.preprocessing/#densetransformer","text":"DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/","title":"DenseTransformer"},{"location":"api_subpackages/mlxtend.preprocessing/#methods_1","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array.","title":"Methods"},{"location":"api_subpackages/mlxtend.preprocessing/#meancenterer","text":"MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/","title":"MeanCenterer"},{"location":"api_subpackages/mlxtend.preprocessing/#methods_2","text":"fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered.","title":"Methods"},{"location":"api_subpackages/mlxtend.preprocessing/#onehottransactions","text":"OnehotTransactions( args, * kwargs) Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/","title":"OnehotTransactions"},{"location":"api_subpackages/mlxtend.preprocessing/#methods_3","text":"fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"Methods"},{"location":"api_subpackages/mlxtend.preprocessing/#transactionencoder","text":"TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/","title":"TransactionEncoder"},{"location":"api_subpackages/mlxtend.preprocessing/#methods_4","text":"fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"Methods"},{"location":"api_subpackages/mlxtend.preprocessing/#minmax_scaling","text":"minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/","title":"minmax_scaling"},{"location":"api_subpackages/mlxtend.preprocessing/#one_hot","text":"one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/","title":"one_hot"},{"location":"api_subpackages/mlxtend.preprocessing/#shuffle_arrays_unison","text":"shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/","title":"shuffle_arrays_unison"},{"location":"api_subpackages/mlxtend.preprocessing/#standardize","text":"standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"standardize"},{"location":"api_subpackages/mlxtend.regressor/","text":"mlxtend version: 0.14.0dev LinearRegression LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause StackingCVRegressor StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self StackingRegressor StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self Properties coef_ None intercept_ None","title":"Mlxtend.regressor"},{"location":"api_subpackages/mlxtend.regressor/#linearregression","text":"LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/","title":"LinearRegression"},{"location":"api_subpackages/mlxtend.regressor/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"api_subpackages/mlxtend.regressor/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.regressor/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.regressor/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"api_subpackages/mlxtend.regressor/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"api_subpackages/mlxtend.regressor/#stackingcvregressor","text":"StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/","title":"StackingCVRegressor"},{"location":"api_subpackages/mlxtend.regressor/#methods_1","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_subpackages/mlxtend.regressor/#stackingregressor","text":"StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/","title":"StackingRegressor"},{"location":"api_subpackages/mlxtend.regressor/#methods_2","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"api_subpackages/mlxtend.regressor/#properties","text":"coef_ None intercept_ None","title":"Properties"},{"location":"api_subpackages/mlxtend.text/","text":"mlxtend version: 0.14.0dev generalize_names generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/ generalize_names_duplcheck generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/ tokenizer_emoticons tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/ tokenizer_words_and_emoticons tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"Mlxtend.text"},{"location":"api_subpackages/mlxtend.text/#generalize_names","text":"generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/","title":"generalize_names"},{"location":"api_subpackages/mlxtend.text/#generalize_names_duplcheck","text":"generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/","title":"generalize_names_duplcheck"},{"location":"api_subpackages/mlxtend.text/#tokenizer_emoticons","text":"tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/","title":"tokenizer_emoticons"},{"location":"api_subpackages/mlxtend.text/#tokenizer_words_and_emoticons","text":"tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"tokenizer_words_and_emoticons"},{"location":"api_subpackages/mlxtend.utils/","text":"mlxtend version: 0.14.0dev Counter Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/ Methods update() Print current iteration and time elapsed. assert_raises assert_raises(exception_type, message, func, args, * kwargs) Check that an exception is raised with a specific message Parameters exception_type : exception The exception that should be raised message : str (default: None) The error message that should be raised. Ignored if False or None. func : callable The function that raises the exception *args : positional arguments to func . **kwargs : keyword arguments to func check_Xy check_Xy(X, y, y_int=True) None format_kwarg_dictionaries format_kwarg_dictionaries(default_kwargs=None, user_kwargs=None, protected_keys=None) Function to combine default and user specified kwargs dictionaries Parameters default_kwargs : dict, optional Default kwargs (default is None). user_kwargs : dict, optional User specified kwargs (default is None). protected_keys : array_like, optional Sequence of keys to be removed from the returned dictionary (default is None). Returns formatted_kwargs : dict Formatted kwargs dictionary.","title":"Mlxtend.utils"},{"location":"api_subpackages/mlxtend.utils/#counter","text":"Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/","title":"Counter"},{"location":"api_subpackages/mlxtend.utils/#methods","text":"update() Print current iteration and time elapsed.","title":"Methods"},{"location":"api_subpackages/mlxtend.utils/#assert_raises","text":"assert_raises(exception_type, message, func, args, * kwargs) Check that an exception is raised with a specific message Parameters exception_type : exception The exception that should be raised message : str (default: None) The error message that should be raised. Ignored if False or None. func : callable The function that raises the exception *args : positional arguments to func . **kwargs : keyword arguments to func","title":"assert_raises"},{"location":"api_subpackages/mlxtend.utils/#check_xy","text":"check_Xy(X, y, y_int=True) None","title":"check_Xy"},{"location":"api_subpackages/mlxtend.utils/#format_kwarg_dictionaries","text":"format_kwarg_dictionaries(default_kwargs=None, user_kwargs=None, protected_keys=None) Function to combine default and user specified kwargs dictionaries Parameters default_kwargs : dict, optional Default kwargs (default is None). user_kwargs : dict, optional User specified kwargs (default is None). protected_keys : array_like, optional Sequence of keys to be removed from the returned dictionary (default is None). Returns formatted_kwargs : dict Formatted kwargs dictionary.","title":"format_kwarg_dictionaries"},{"location":"user_guide/classifier/Adaline/","text":"Adaptive Linear Neuron -- Adaline An implementation of the ADAptive LInear NEuron, Adaline, for binary classification tasks. from mlxtend.classifier import Adaline Overview An illustration of the ADAptive LInear NEuron (Adaline) -- a single-layer artificial linear neuron with a threshold unit: The Adaline classifier is closely related to the Ordinary Least Squares (OLS) Linear Regression algorithm; in OLS regression we find the line (or hyperplane) that minimizes the vertical offsets. Or in other words, we define the best-fitting line as the line that minimizes the sum of squared errors (SSE) or mean squared error (MSE) between our target variable (y) and our predicted output over all samples i in our dataset of size n . SSE = \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})^2 MSE = \\frac{1}{n} \\times SSE LinearRegression implements a linear regression model for performing ordinary least squares regression, and in Adaline, we add a threshold function g(\\cdot) to convert the continuous outcome to a categorical class label: $$y = g({z}) = \\begin{cases} 1 & \\text{if z $\\ge$ 0}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ An Adaline model can be trained by one of the following three approaches: Normal Equations Gradient Descent Stochastic Gradient Descent Normal Equations (closed-form solution) The closed-form solution should be preferred for \"smaller\" datasets where calculating (a \"costly\") matrix inverse is not a concern. For very large datasets, or datasets where the inverse of [X^T X] may not exist (the matrix is non-invertible or singular, e.g., in case of perfect multicollinearity), the gradient descent or stochastic gradient descent approaches are to be preferred. The linear function (linear regression model) is defined as: z = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{j=0}^{m} w_j x_j = \\mathbf{w}^T\\mathbf{x} where y is the response variable, \\mathbf{x} is an m -dimensional sample vector, and \\mathbf{w} is the weight vector (vector of coefficients). Note that w_0 represents the y-axis intercept of the model and therefore x_0=1 . Using the closed-form solution (normal equation), we compute the weights of the model as follows: \\mathbf{w} = (\\mathbf{X}^T\\mathbf{X})^{-1}\\mathbf{X}^Ty Gradient Descent (GD) and Stochastic Gradient Descent (SGD) In the current implementation, the Adaline model is learned via Gradient Descent or Stochastic Gradient Descent. See Gradient Descent and Stochastic Gradient Descent and Deriving the Gradient Descent Rule for Linear Regression and Adaline for details. Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates References B. Widrow, M. E. Hoff, et al. Adaptive switching circuits . 1960. Example 1 - Closed Form Solution from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=30, eta=0.01, minibatches=None, random_seed=1) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() Example 2 - Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=30, eta=0.01, minibatches=1, # for Gradient Descent Learning random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') Iteration: 30/30 | Cost 3.79 | Elapsed: 0:00:00 | ETA: 0:00:00 Example 3 - Stochastic Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=15, eta=0.02, minibatches=len(y), # for SGD learning random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 15/15 | Cost 3.81 | Elapsed: 0:00:00 | ETA: 0:00:00 Example 4 - Stochastic Gradient Descent with Minibatches from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=15, eta=0.02, minibatches=5, # for SGD learning w. minibatch size 20 random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 15/15 | Cost 3.87 | Elapsed: 0:00:00 | ETA: 0:00:00 API Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Adaptive Linear Neuron -- Adaline"},{"location":"user_guide/classifier/Adaline/#adaptive-linear-neuron-adaline","text":"An implementation of the ADAptive LInear NEuron, Adaline, for binary classification tasks. from mlxtend.classifier import Adaline","title":"Adaptive Linear Neuron -- Adaline"},{"location":"user_guide/classifier/Adaline/#overview","text":"An illustration of the ADAptive LInear NEuron (Adaline) -- a single-layer artificial linear neuron with a threshold unit: The Adaline classifier is closely related to the Ordinary Least Squares (OLS) Linear Regression algorithm; in OLS regression we find the line (or hyperplane) that minimizes the vertical offsets. Or in other words, we define the best-fitting line as the line that minimizes the sum of squared errors (SSE) or mean squared error (MSE) between our target variable (y) and our predicted output over all samples i in our dataset of size n . SSE = \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})^2 MSE = \\frac{1}{n} \\times SSE LinearRegression implements a linear regression model for performing ordinary least squares regression, and in Adaline, we add a threshold function g(\\cdot) to convert the continuous outcome to a categorical class label: $$y = g({z}) = \\begin{cases} 1 & \\text{if z $\\ge$ 0}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ An Adaline model can be trained by one of the following three approaches: Normal Equations Gradient Descent Stochastic Gradient Descent","title":"Overview"},{"location":"user_guide/classifier/Adaline/#normal-equations-closed-form-solution","text":"The closed-form solution should be preferred for \"smaller\" datasets where calculating (a \"costly\") matrix inverse is not a concern. For very large datasets, or datasets where the inverse of [X^T X] may not exist (the matrix is non-invertible or singular, e.g., in case of perfect multicollinearity), the gradient descent or stochastic gradient descent approaches are to be preferred. The linear function (linear regression model) is defined as: z = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{j=0}^{m} w_j x_j = \\mathbf{w}^T\\mathbf{x} where y is the response variable, \\mathbf{x} is an m -dimensional sample vector, and \\mathbf{w} is the weight vector (vector of coefficients). Note that w_0 represents the y-axis intercept of the model and therefore x_0=1 . Using the closed-form solution (normal equation), we compute the weights of the model as follows: \\mathbf{w} = (\\mathbf{X}^T\\mathbf{X})^{-1}\\mathbf{X}^Ty","title":"Normal Equations (closed-form solution)"},{"location":"user_guide/classifier/Adaline/#gradient-descent-gd-and-stochastic-gradient-descent-sgd","text":"In the current implementation, the Adaline model is learned via Gradient Descent or Stochastic Gradient Descent. See Gradient Descent and Stochastic Gradient Descent and Deriving the Gradient Descent Rule for Linear Regression and Adaline for details. Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates","title":"Gradient Descent (GD) and Stochastic Gradient Descent (SGD)"},{"location":"user_guide/classifier/Adaline/#references","text":"B. Widrow, M. E. Hoff, et al. Adaptive switching circuits . 1960.","title":"References"},{"location":"user_guide/classifier/Adaline/#example-1-closed-form-solution","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=30, eta=0.01, minibatches=None, random_seed=1) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show()","title":"Example 1 - Closed Form Solution"},{"location":"user_guide/classifier/Adaline/#example-2-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=30, eta=0.01, minibatches=1, # for Gradient Descent Learning random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') Iteration: 30/30 | Cost 3.79 | Elapsed: 0:00:00 | ETA: 0:00:00 ","title":"Example 2 - Gradient Descent"},{"location":"user_guide/classifier/Adaline/#example-3-stochastic-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=15, eta=0.02, minibatches=len(y), # for SGD learning random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 15/15 | Cost 3.81 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 3 - Stochastic Gradient Descent"},{"location":"user_guide/classifier/Adaline/#example-4-stochastic-gradient-descent-with-minibatches","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Adaline import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() ada = Adaline(epochs=15, eta=0.02, minibatches=5, # for SGD learning w. minibatch size 20 random_seed=1, print_progress=3) ada.fit(X, y) plot_decision_regions(X, y, clf=ada) plt.title('Adaline - Stochastic Gradient Descent') plt.show() plt.plot(range(len(ada.cost_)), ada.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 15/15 | Cost 3.87 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 4 - Stochastic Gradient Descent with Minibatches"},{"location":"user_guide/classifier/Adaline/#api","text":"Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) ADAptive LInear NEuron classifier. Note that this implementation of Adaline expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/","title":"API"},{"location":"user_guide/classifier/Adaline/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Methods"},{"location":"user_guide/classifier/EnsembleVoteClassifier/","text":"EnsembleVoteClassifier Implementation of a majority voting EnsembleVoteClassifier for classification. from mlxtend.classifier import EnsembleVoteClassifier Overview The EnsembleVoteClassifier is a meta-classifier for combining similar or conceptually different machine learning classifiers for classification via majority or plurality voting. (For simplicity, we will refer to both majority and plurality voting as majority voting.) The EnsembleVoteClassifier implements \"hard\" and \"soft\" voting. In hard voting, we predict the final class label as the class label that has been predicted most frequently by the classification models. In soft voting, we predict the class labels by averaging the class-probabilities (only recommended if the classifiers are well-calibrated). Note If you are interested in using the EnsembleVoteClassifier , please note that it is now also available through scikit learn (>0.17) as VotingClassifier . Majority Voting / Hard Voting Hard voting is the simplest case of majority voting. Here, we predict the class label \\hat{y} via majority (plurality) voting of each classifier C_j : \\hat{y}=mode\\{C_1(\\mathbf{x}), C_2(\\mathbf{x}), ..., C_m(\\mathbf{x})\\} Assuming that we combine three classifiers that classify a training sample as follows: classifier 1 -> class 0 classifier 2 -> class 0 classifier 3 -> class 1 \\hat{y}=mode\\{0, 0, 1\\} = 0 Via majority vote, we would we would classify the sample as \"class 0.\" Weighted Majority Vote In addition to the simple majority vote (hard voting) as described in the previous section, we can compute a weighted majority vote by associating a weight w_j with classifier C_j : \\hat{y} = \\arg \\max_i \\sum^{m}_{j=1} w_j \\chi_A \\big(C_j(\\mathbf{x})=i\\big), where \\chi_A is the characteristic function [C_j(\\mathbf{x}) = i \\; \\in A] , and A is the set of unique class labels. Continuing with the example from the previous section classifier 1 -> class 0 classifier 2 -> class 0 classifier 3 -> class 1 assigning the weights {0.2, 0.2, 0.6} would yield a prediction \\hat{y} = 1 : \\arg \\max_i [0.2 \\times i_0 + 0.2 \\times i_0 + 0.6 \\times i_1] = 1 Soft Voting In soft voting, we predict the class labels based on the predicted probabilities p for classifier -- this approach is only recommended if the classifiers are well-calibrated. \\hat{y} = \\arg \\max_i \\sum^{m}_{j=1} w_j p_{ij}, where w_j is the weight that can be assigned to the j th classifier. Assuming the example in the previous section was a binary classification task with class labels i \\in \\{0, 1\\} , our ensemble could make the following prediction: C_1(\\mathbf{x}) \\rightarrow [0.9, 0.1] C_2(\\mathbf{x}) \\rightarrow [0.8, 0.2] C_3(\\mathbf{x}) \\rightarrow [0.4, 0.6] Using uniform weights, we compute the average probabilities: p(i_0 \\mid \\mathbf{x}) = \\frac{0.9 + 0.8 + 0.4}{3} = 0.7 \\\\\\\\ p(i_1 \\mid \\mathbf{x}) = \\frac{0.1 + 0.2 + 0.6}{3} = 0.3 \\hat{y} = \\arg \\max_i \\big[p(i_0 \\mid \\mathbf{x}), p(i_1 \\mid \\mathbf{x}) \\big] = 0 However, assigning the weights {0.1, 0.1, 0.8} would yield a prediction \\hat{y} = 1 : p(i_0 \\mid \\mathbf{x}) = {0.1 \\times 0.9 + 0.1 \\times 0.8 + 0.8 \\times 0.4} = 0.49 \\\\\\\\ p(i_1 \\mid \\mathbf{x}) = {0.1 \\times 0.1 + 0.2 \\times 0.1 + 0.8 \\times 0.6} = 0.51 \\hat{y} = \\arg \\max_i \\big[p(i_0 \\mid \\mathbf{x}), p(i_1 \\mid \\mathbf{x}) \\big] = 1 References [1] S. Raschka. Python Machine Learning . Packt Publishing Ltd., 2015. Example 1 - Classifying Iris Flowers Using Different Classification Models from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier import numpy as np clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() print('5-fold cross validation:\\n') labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes'] for clf, label in zip([clf1, clf2, clf3], labels): scores = model_selection.cross_val_score(clf, X, y, cv=5, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 5-fold cross validation: Accuracy: 0.90 (+/- 0.05) [Logistic Regression] Accuracy: 0.93 (+/- 0.05) [Random Forest] Accuracy: 0.91 (+/- 0.04) [Naive Bayes] from mlxtend.classifier import EnsembleVoteClassifier eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[1,1,1]) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, label in zip([clf1, clf2, clf3, eclf], labels): scores = model_selection.cross_val_score(clf, X, y, cv=5, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) Accuracy: 0.90 (+/- 0.05) [Logistic Regression] Accuracy: 0.93 (+/- 0.05) [Random Forest] Accuracy: 0.91 (+/- 0.04) [Naive Bayes] Accuracy: 0.95 (+/- 0.05) [Ensemble] Plotting Decision Regions import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) Example 2 - Grid Search from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') params = {'logisticregression__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200],} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid.fit(iris.data, iris.target) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) 0.953 +/- 0.01 {'logisticregression__C': 1.0, 'randomforestclassifier__n_estimators': 20} 0.960 +/- 0.01 {'logisticregression__C': 1.0, 'randomforestclassifier__n_estimators': 200} 0.960 +/- 0.01 {'logisticregression__C': 100.0, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'logisticregression__C': 100.0, 'randomforestclassifier__n_estimators': 200} Note : If the EnsembleClassifier is initialized with multiple similar estimator objects, the estimator names are modified with consecutive integer indices, for example: clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) eclf = EnsembleVoteClassifier(clfs=[clf1, clf1, clf2], voting='soft') params = {'logisticregression-1__C': [1.0, 100.0], 'logisticregression-2__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200],} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid = grid.fit(iris.data, iris.target) Note The EnsembleVoteClass also enables grid search over the clfs argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'clfs': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] . Example 3 - Majority voting with classifiers trained on different feature subsets Feature selection algorithms implemented in scikit-learn as well as the SequentialFeatureSelector implement a transform method that passes the reduced feature subset to the next item in a Pipeline . For example, the method def transform(self, X): return X[:, self.k_feature_idx_] returns the best feature columns, k_feature_idx_ , given a dataset X. Thus, we simply need to construct a Pipeline consisting of the feature selector and the classifier in order to select different feature subsets for different algorithms. During fitting , the optimal feature subsets are automatically determined via the GridSearchCV object, and by calling predict , the fitted feature selector in the pipeline only passes these columns along, which resulted in the best performance for the respective classifier. from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, :], iris.target from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier from sklearn.pipeline import Pipeline from mlxtend.feature_selection import SequentialFeatureSelector clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() # Creating a feature-selection-classifier pipeline sfs1 = SequentialFeatureSelector(clf1, k_features=4, forward=True, floating=False, scoring='accuracy', verbose=0, cv=0) clf1_pipe = Pipeline([('sfs', sfs1), ('logreg', clf1)]) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') params = {'pipeline__sfs__k_features': [1, 2, 3], 'pipeline__logreg__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200]} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid.fit(iris.data, iris.target) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 200} 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 200} 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 200} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 200} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 200} 0.960 +/- 0.01 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 200} The best parameters determined via GridSearch are: grid.best_params_ {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} Now, we assign these parameters to the ensemble voting classifier, fit the models on the complete training set, and perform a prediction on 3 samples from the Iris dataset. eclf = eclf.set_params(**grid.best_params_) eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2]) Manual Approach Alternatively, we can select different columns \"manually\" using the ColumnSelector object. In this example, we select only the first (sepal length) and third (petal length) column for the logistic regression classifier ( clf1 ). from mlxtend.feature_selection import ColumnSelector col_sel = ColumnSelector(cols=[0, 2]) clf1_pipe = Pipeline([('sel', col_sel), ('logreg', clf1)]) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2]) Furthermore, we can fit the SequentialFeatureSelector separately, outside the grid search hyperparameter optimization pipeline. Here, we determine the best features first, and then we construct a pipeline using these \"fixed,\" best features as seed for the ColumnSelector : sfs1 = SequentialFeatureSelector(clf1, k_features=2, forward=True, floating=False, scoring='accuracy', verbose=1, cv=0) sfs1.fit(X, y) print('Best features', sfs1.k_feature_idx_) col_sel = ColumnSelector(cols=sfs1.k_feature_idx_) clf1_pipe = Pipeline([('sel', col_sel), ('logreg', clf1)]) [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished Features: 1/2[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished Features: 2/2 Best features (0, 2) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2]) Example 5 - Using Pre-fitted Classifiers from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target Assume that we previously fitted our classifiers: from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier import numpy as np clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() for clf in (clf1, clf2, clf3): clf.fit(X, y) By setting refit=False , the EnsembleVoteClassifier will not re-fit these classifers to save computational time: from mlxtend.classifier import EnsembleVoteClassifier import copy eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[1,1,1], refit=False) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] eclf.fit(X, y) print('accuracy:', np.mean(y == eclf.predict(X))) accuracy: 0.973333333333 However, please note that refit=False is incompatible to any form of cross-validation that is done in e.g., model_selection.cross_val_score or model_selection.GridSearchCV , etc., since it would require the classifiers to be refit to the training folds. Thus, only use refit=False if you want to make a prediction directly without cross-validation. Example 6 - Ensembles of Classifiers that Operate on Different Feature Subsets If desired, the different classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import EnsembleVoteClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) eclf = EnsembleVoteClassifier(clfs=[pipe1, pipe2]) eclf.fit(X, y) EnsembleVoteClassifier(clfs=[Pipeline(memory=None, steps=[('columnselector', ColumnSelector(cols=(0, 2), drop_axis=False)), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], refit=True, verbose=0, voting='hard', weights=None) Example 7 - A Note about Scikit-Learn SVMs and Soft Voting This section provides some additional technical insights in how probabilities are used when voting='soft' . Note that scikit-learn estimates the probabilities for SVMs (more info here: http://scikit-learn.org/stable/modules/svm.html#scores-probabilities) in a way that these may not be consistent with the class labels that the SVM predicts. This is an extreme example, but let's say we have a dataset with 3 class labels, 0, 1, and 2. For a given training example, the SVM classifier may predict class 2. However, the class-membership probabilities may look as follows: class 0: 99% class 1: 0.5% class 2: 0.5% A practical example of this scenario is shown below: import numpy as np from mlxtend.classifier import EnsembleVoteClassifier from sklearn.svm import SVC from sklearn.datasets import load_iris iris = load_iris() X, y = iris.data, iris.target clf2 = SVC(probability=True, random_state=4) clf2.fit(X, y) eclf = EnsembleVoteClassifier(clfs=[clf2], voting='soft', refit=False) eclf.fit(X, y) for svm_class, e_class, svm_prob, e_prob, in zip(clf2.predict(X), eclf.predict(X), clf2.predict_proba(X), eclf.predict_proba(X)): if svm_class != e_class: print('============') print('Probas from SVM :', svm_prob) print('Class from SVM :', svm_class) print('Probas from SVM in Ensemble:', e_prob) print('Class from SVM in Ensemble :', e_class) print('============') ============ Probas from SVM : [ 0.01192489 0.47662663 0.51144848] Class from SVM : 1 Probas from SVM in Ensemble: [ 0.01192489 0.47662663 0.51144848] Class from SVM in Ensemble : 2 ============ Based on the probabilities, we would expect the SVM to predict class 2, because it has the highest probability. Since the EnsembleVoteClassifier uses the argmax function internally if voting='soft' , it would indeed predict class 2 in this case even if the ensemble consists of only one SVM model. Note that in practice, this minor technical detail does not need to concern you, but it is useful to keep it in mind in case you are wondering about results from a 1-model SVM ensemble compared to that SVM alone -- this is not a bug. API EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier.","title":"EnsembleVoteClassifier"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#ensemblevoteclassifier","text":"Implementation of a majority voting EnsembleVoteClassifier for classification. from mlxtend.classifier import EnsembleVoteClassifier","title":"EnsembleVoteClassifier"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#overview","text":"The EnsembleVoteClassifier is a meta-classifier for combining similar or conceptually different machine learning classifiers for classification via majority or plurality voting. (For simplicity, we will refer to both majority and plurality voting as majority voting.) The EnsembleVoteClassifier implements \"hard\" and \"soft\" voting. In hard voting, we predict the final class label as the class label that has been predicted most frequently by the classification models. In soft voting, we predict the class labels by averaging the class-probabilities (only recommended if the classifiers are well-calibrated). Note If you are interested in using the EnsembleVoteClassifier , please note that it is now also available through scikit learn (>0.17) as VotingClassifier .","title":"Overview"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#majority-voting-hard-voting","text":"Hard voting is the simplest case of majority voting. Here, we predict the class label \\hat{y} via majority (plurality) voting of each classifier C_j : \\hat{y}=mode\\{C_1(\\mathbf{x}), C_2(\\mathbf{x}), ..., C_m(\\mathbf{x})\\} Assuming that we combine three classifiers that classify a training sample as follows: classifier 1 -> class 0 classifier 2 -> class 0 classifier 3 -> class 1 \\hat{y}=mode\\{0, 0, 1\\} = 0 Via majority vote, we would we would classify the sample as \"class 0.\"","title":"Majority Voting / Hard Voting"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#weighted-majority-vote","text":"In addition to the simple majority vote (hard voting) as described in the previous section, we can compute a weighted majority vote by associating a weight w_j with classifier C_j : \\hat{y} = \\arg \\max_i \\sum^{m}_{j=1} w_j \\chi_A \\big(C_j(\\mathbf{x})=i\\big), where \\chi_A is the characteristic function [C_j(\\mathbf{x}) = i \\; \\in A] , and A is the set of unique class labels. Continuing with the example from the previous section classifier 1 -> class 0 classifier 2 -> class 0 classifier 3 -> class 1 assigning the weights {0.2, 0.2, 0.6} would yield a prediction \\hat{y} = 1 : \\arg \\max_i [0.2 \\times i_0 + 0.2 \\times i_0 + 0.6 \\times i_1] = 1","title":"Weighted Majority Vote"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#soft-voting","text":"In soft voting, we predict the class labels based on the predicted probabilities p for classifier -- this approach is only recommended if the classifiers are well-calibrated. \\hat{y} = \\arg \\max_i \\sum^{m}_{j=1} w_j p_{ij}, where w_j is the weight that can be assigned to the j th classifier. Assuming the example in the previous section was a binary classification task with class labels i \\in \\{0, 1\\} , our ensemble could make the following prediction: C_1(\\mathbf{x}) \\rightarrow [0.9, 0.1] C_2(\\mathbf{x}) \\rightarrow [0.8, 0.2] C_3(\\mathbf{x}) \\rightarrow [0.4, 0.6] Using uniform weights, we compute the average probabilities: p(i_0 \\mid \\mathbf{x}) = \\frac{0.9 + 0.8 + 0.4}{3} = 0.7 \\\\\\\\ p(i_1 \\mid \\mathbf{x}) = \\frac{0.1 + 0.2 + 0.6}{3} = 0.3 \\hat{y} = \\arg \\max_i \\big[p(i_0 \\mid \\mathbf{x}), p(i_1 \\mid \\mathbf{x}) \\big] = 0 However, assigning the weights {0.1, 0.1, 0.8} would yield a prediction \\hat{y} = 1 : p(i_0 \\mid \\mathbf{x}) = {0.1 \\times 0.9 + 0.1 \\times 0.8 + 0.8 \\times 0.4} = 0.49 \\\\\\\\ p(i_1 \\mid \\mathbf{x}) = {0.1 \\times 0.1 + 0.2 \\times 0.1 + 0.8 \\times 0.6} = 0.51 \\hat{y} = \\arg \\max_i \\big[p(i_0 \\mid \\mathbf{x}), p(i_1 \\mid \\mathbf{x}) \\big] = 1","title":"Soft Voting"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#references","text":"[1] S. Raschka. Python Machine Learning . Packt Publishing Ltd., 2015.","title":"References"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-1-classifying-iris-flowers-using-different-classification-models","text":"from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier import numpy as np clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() print('5-fold cross validation:\\n') labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes'] for clf, label in zip([clf1, clf2, clf3], labels): scores = model_selection.cross_val_score(clf, X, y, cv=5, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 5-fold cross validation: Accuracy: 0.90 (+/- 0.05) [Logistic Regression] Accuracy: 0.93 (+/- 0.05) [Random Forest] Accuracy: 0.91 (+/- 0.04) [Naive Bayes] from mlxtend.classifier import EnsembleVoteClassifier eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[1,1,1]) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, label in zip([clf1, clf2, clf3, eclf], labels): scores = model_selection.cross_val_score(clf, X, y, cv=5, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) Accuracy: 0.90 (+/- 0.05) [Logistic Regression] Accuracy: 0.93 (+/- 0.05) [Random Forest] Accuracy: 0.91 (+/- 0.04) [Naive Bayes] Accuracy: 0.95 (+/- 0.05) [Ensemble]","title":"Example 1 - Classifying Iris Flowers Using Different Classification Models"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#plotting-decision-regions","text":"import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] for clf, lab, grd in zip([clf1, clf2, clf3, eclf], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab)","title":"Plotting Decision Regions"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-2-grid-search","text":"from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') params = {'logisticregression__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200],} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid.fit(iris.data, iris.target) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) 0.953 +/- 0.01 {'logisticregression__C': 1.0, 'randomforestclassifier__n_estimators': 20} 0.960 +/- 0.01 {'logisticregression__C': 1.0, 'randomforestclassifier__n_estimators': 200} 0.960 +/- 0.01 {'logisticregression__C': 100.0, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'logisticregression__C': 100.0, 'randomforestclassifier__n_estimators': 200} Note : If the EnsembleClassifier is initialized with multiple similar estimator objects, the estimator names are modified with consecutive integer indices, for example: clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) eclf = EnsembleVoteClassifier(clfs=[clf1, clf1, clf2], voting='soft') params = {'logisticregression-1__C': [1.0, 100.0], 'logisticregression-2__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200],} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid = grid.fit(iris.data, iris.target) Note The EnsembleVoteClass also enables grid search over the clfs argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'clfs': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] .","title":"Example 2 - Grid Search"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-3-majority-voting-with-classifiers-trained-on-different-feature-subsets","text":"Feature selection algorithms implemented in scikit-learn as well as the SequentialFeatureSelector implement a transform method that passes the reduced feature subset to the next item in a Pipeline . For example, the method def transform(self, X): return X[:, self.k_feature_idx_] returns the best feature columns, k_feature_idx_ , given a dataset X. Thus, we simply need to construct a Pipeline consisting of the feature selector and the classifier in order to select different feature subsets for different algorithms. During fitting , the optimal feature subsets are automatically determined via the GridSearchCV object, and by calling predict , the fitted feature selector in the pipeline only passes these columns along, which resulted in the best performance for the respective classifier. from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, :], iris.target from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import EnsembleVoteClassifier from sklearn.pipeline import Pipeline from mlxtend.feature_selection import SequentialFeatureSelector clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() # Creating a feature-selection-classifier pipeline sfs1 = SequentialFeatureSelector(clf1, k_features=4, forward=True, floating=False, scoring='accuracy', verbose=0, cv=0) clf1_pipe = Pipeline([('sfs', sfs1), ('logreg', clf1)]) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') params = {'pipeline__sfs__k_features': [1, 2, 3], 'pipeline__logreg__C': [1.0, 100.0], 'randomforestclassifier__n_estimators': [20, 200]} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid.fit(iris.data, iris.target) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 200} 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 200} 0.953 +/- 0.01 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 1.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 200} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 1, 'randomforestclassifier__n_estimators': 200} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 20} 0.947 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 2, 'randomforestclassifier__n_estimators': 200} 0.960 +/- 0.01 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} 0.953 +/- 0.02 {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 200} The best parameters determined via GridSearch are: grid.best_params_ {'pipeline__logreg__C': 100.0, 'pipeline__sfs__k_features': 3, 'randomforestclassifier__n_estimators': 20} Now, we assign these parameters to the ensemble voting classifier, fit the models on the complete training set, and perform a prediction on 3 samples from the Iris dataset. eclf = eclf.set_params(**grid.best_params_) eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2])","title":"Example 3 - Majority voting with classifiers trained on different feature subsets"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#manual-approach","text":"Alternatively, we can select different columns \"manually\" using the ColumnSelector object. In this example, we select only the first (sepal length) and third (petal length) column for the logistic regression classifier ( clf1 ). from mlxtend.feature_selection import ColumnSelector col_sel = ColumnSelector(cols=[0, 2]) clf1_pipe = Pipeline([('sel', col_sel), ('logreg', clf1)]) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2]) Furthermore, we can fit the SequentialFeatureSelector separately, outside the grid search hyperparameter optimization pipeline. Here, we determine the best features first, and then we construct a pipeline using these \"fixed,\" best features as seed for the ColumnSelector : sfs1 = SequentialFeatureSelector(clf1, k_features=2, forward=True, floating=False, scoring='accuracy', verbose=1, cv=0) sfs1.fit(X, y) print('Best features', sfs1.k_feature_idx_) col_sel = ColumnSelector(cols=sfs1.k_feature_idx_) clf1_pipe = Pipeline([('sel', col_sel), ('logreg', clf1)]) [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished Features: 1/2[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished Features: 2/2 Best features (0, 2) eclf = EnsembleVoteClassifier(clfs=[clf1_pipe, clf2, clf3], voting='soft') eclf.fit(X, y).predict(X[[1, 51, 149]]) array([0, 1, 2])","title":"Manual Approach"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-5-using-pre-fitted-classifiers","text":"from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target Assume that we previously fitted our classifiers: from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier import numpy as np clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() for clf in (clf1, clf2, clf3): clf.fit(X, y) By setting refit=False , the EnsembleVoteClassifier will not re-fit these classifers to save computational time: from mlxtend.classifier import EnsembleVoteClassifier import copy eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[1,1,1], refit=False) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'Ensemble'] eclf.fit(X, y) print('accuracy:', np.mean(y == eclf.predict(X))) accuracy: 0.973333333333 However, please note that refit=False is incompatible to any form of cross-validation that is done in e.g., model_selection.cross_val_score or model_selection.GridSearchCV , etc., since it would require the classifiers to be refit to the training folds. Thus, only use refit=False if you want to make a prediction directly without cross-validation.","title":"Example 5 - Using Pre-fitted Classifiers"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-6-ensembles-of-classifiers-that-operate-on-different-feature-subsets","text":"If desired, the different classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import EnsembleVoteClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) eclf = EnsembleVoteClassifier(clfs=[pipe1, pipe2]) eclf.fit(X, y) EnsembleVoteClassifier(clfs=[Pipeline(memory=None, steps=[('columnselector', ColumnSelector(cols=(0, 2), drop_axis=False)), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], refit=True, verbose=0, voting='hard', weights=None)","title":"Example 6 - Ensembles of Classifiers that Operate on Different Feature Subsets"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#example-7-a-note-about-scikit-learn-svms-and-soft-voting","text":"This section provides some additional technical insights in how probabilities are used when voting='soft' . Note that scikit-learn estimates the probabilities for SVMs (more info here: http://scikit-learn.org/stable/modules/svm.html#scores-probabilities) in a way that these may not be consistent with the class labels that the SVM predicts. This is an extreme example, but let's say we have a dataset with 3 class labels, 0, 1, and 2. For a given training example, the SVM classifier may predict class 2. However, the class-membership probabilities may look as follows: class 0: 99% class 1: 0.5% class 2: 0.5% A practical example of this scenario is shown below: import numpy as np from mlxtend.classifier import EnsembleVoteClassifier from sklearn.svm import SVC from sklearn.datasets import load_iris iris = load_iris() X, y = iris.data, iris.target clf2 = SVC(probability=True, random_state=4) clf2.fit(X, y) eclf = EnsembleVoteClassifier(clfs=[clf2], voting='soft', refit=False) eclf.fit(X, y) for svm_class, e_class, svm_prob, e_prob, in zip(clf2.predict(X), eclf.predict(X), clf2.predict_proba(X), eclf.predict_proba(X)): if svm_class != e_class: print('============') print('Probas from SVM :', svm_prob) print('Class from SVM :', svm_class) print('Probas from SVM in Ensemble:', e_prob) print('Class from SVM in Ensemble :', e_class) print('============') ============ Probas from SVM : [ 0.01192489 0.47662663 0.51144848] Class from SVM : 1 Probas from SVM in Ensemble: [ 0.01192489 0.47662663 0.51144848] Class from SVM in Ensemble : 2 ============ Based on the probabilities, we would expect the SVM to predict class 2, because it has the highest probability. Since the EnsembleVoteClassifier uses the argmax function internally if voting='soft' , it would indeed predict class 2 in this case even if the ensemble consists of only one SVM model. Note that in practice, this minor technical detail does not need to concern you, but it is useful to keep it in mind in case you are wondering about results from a 1-model SVM ensemble compared to that SVM alone -- this is not a bug.","title":"Example 7 - A Note about Scikit-Learn SVMs and Soft Voting"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#api","text":"EnsembleVoteClassifier(clfs, voting='hard', weights=None, verbose=0, refit=True) Soft Voting/Majority Rule classifier for scikit-learn estimators. Parameters clfs : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the VotingClassifier will fit clones of those original classifiers that will be stored in the class attribute self.clfs_ if refit=True (default). voting : str, {'hard', 'soft'} (default='hard') If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probalities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like, shape = [n_classifiers], optional (default= None ) Sequence of weights ( float or int ) to weight the occurances of predicted class labels ( hard voting) or class probabilities before averaging ( soft voting). Uses uniform weights if None . verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the clf being fitted - verbose=2 : Prints info about the parameters of the clf being fitted - verbose>2 : Changes verbose param of the underlying clf to self.verbose - 2 refit : bool (default: True) Refits classifiers in clfs if True; uses references to the clfs , otherwise (assumes that the classifiers were already fit). Note: refit=False is incompatible to mist scikit-learn wrappers! For instance, if any form of cross-validation is performed this would require the re-fitting classifiers to training folds, which would raise a NotFitterError if refit=False. (New in mlxtend v0.6.) Attributes classes_ : array-like, shape = [n_predictions] clf : array-like, shape = [n_predictions] The unmodified input classifiers clf_ : array-like, shape = [n_predictions] Fitted clones of the input classifiers Examples >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier >>> from mlxtend.sklearn import EnsembleVoteClassifier >>> clf1 = LogisticRegression(random_seed=1) >>> clf2 = RandomForestClassifier(random_seed=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='hard', verbose=1) >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> eclf2 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] >>> eclf3 = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], ... voting='soft', weights=[2,1,1]) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/","title":"API"},{"location":"user_guide/classifier/EnsembleVoteClassifier/#methods","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict class labels for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns maj : array-like, shape = [n_samples] Predicted class labels. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns avg : array-like, shape = [n_samples, n_classes] Weighted average probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return class labels or probabilities for X for each estimator. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns If voting='soft'`` : array-like = [n_classifiers, n_samples, n_classes] Class probabilties calculated by each classifier. If voting='hard'`` : array-like = [n_classifiers, n_samples] Class labels predicted by each classifier.","title":"Methods"},{"location":"user_guide/classifier/LogisticRegression/","text":"Logistic Regression A logistic regression class for binary classification tasks. from mlxtend.classifier import LogisticRegression Overview Related to the Perceptron and 'Adaline' , a Logistic Regression model is a linear model for binary classification. However, instead of minimizing a linear cost function such as the sum of squared errors (SSE) in Adaline, we minimize a sigmoid function, i.e., the logistic function: \\phi(z) = \\frac{1}{1 + e^{-z}}, where z is defined as the net input z = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{j=0}^{m} w_j x_j= \\mathbf{w}^T\\mathbf{x}. The net input is in turn based on the logit function logit(p(y=1 \\mid \\mathbf{x})) = z. Here, p(y=1 \\mid \\mathbf{x}) is the conditional probability that a particular sample belongs to class 1 given its features \\mathbf{x} . The logit function takes inputs in the range [0, 1] and transform them to values over the entire real number range. In contrast, the logistic function takes input values over the entire real number range and transforms them to values in the range [0, 1]. In other words, the logistic function is the inverse of the logit function, and it lets us predict the conditional probability that a certain sample belongs to class 1 (or class 0). After model fitting, the conditional probability p(y=1 \\mid \\mathbf{x}) is converted to a binary class label via a threshold function g(\\cdot) : $$y = g({z}) = \\begin{cases} 1 & \\text{if $\\phi(z) \\ge 0.5$}\\\\ 0 & \\text{otherwise.} \\end{cases} $$ or equivalently: $$y = g({z}) = \\begin{cases} 1 & \\text{if z $\\ge$ 0}\\\\ 0 & \\text{otherwise}. \\end{cases} $$ Objective Function -- Log-Likelihood In order to parameterize a logistic regression model, we maximize the likelihood L(\\cdot) (or minimize the logistic cost function). We write the likelihood as L(\\mathbf{w}) = P(\\mathbf{y} \\mid \\mathbf{x};\\mathbf{w}) = \\prod_{i=1}^{n} P\\big(y^{(i)} \\mid x^{(i)}; \\mathbf{w}\\big) = \\prod^{n}_{i=1}\\bigg(\\phi\\big(z^{(i)}\\big)\\bigg)^{y^{(i)}} \\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg)^{1-y^{(i)}}, under the assumption that the training samples are independent of each other. In practice, it is easier to maximize the (natural) log of this equation, which is called the log-likelihood function: l(\\mathbf{w}) = \\log L(\\mathbf{w}) = \\sum^{n}_{i=1} y^{(i)} \\log \\bigg(\\phi\\big(z^{(i)}\\big)\\bigg) + \\big( 1 - y^{(i)}\\big) \\log \\big(1-\\phi\\big(z^{(i)}\\big)\\big) One advantage of taking the log is to avoid numeric underflow (and challenges with floating point math) for very small likelihoods. Another advantage is that we can obtain the derivative more easily, using the addition trick to rewrite the product of factors as a summation term, which we can then maximize using optimization algorithms such as gradient ascent. Objective Function -- Logistic Cost Function An alternative to maximizing the log-likelihood, we can define a cost function J(\\cdot) to be minimized; we rewrite the log-likelihood as: J(\\mathbf{w}) = \\sum_{i=1}^{m} - y^{(i)} log \\bigg( \\phi\\big(z^{(i)}\\big) \\bigg) - \\big(1 - y^{(i)}\\big) log\\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg) $$ J\\big(\\phi(z), y; \\mathbf{w}\\big) = \\begin{cases} -log\\big(\\phi(z) \\big) & \\text{if $y = 1$}\\\\ -log\\big(1- \\phi(z) \\big) & \\text{if $y = 0$} \\end{cases} $$ As we can see in the figure above, we penalize wrong predictions with an increasingly larger cost. Gradient Descent (GD) and Stochastic Gradient Descent (SGD) Optimization Gradient Ascent and the log-likelihood To learn the weight coefficient of a logistic regression model via gradient-based optimization, we compute the partial derivative of the log-likelihood function -- w.r.t. the j th weight -- as follows: \\frac{\\partial}{\\partial w_j} l(\\mathbf{w}) = \\bigg(y \\frac{1}{\\phi(z)} - (1-y) \\frac{1}{1-\\phi{(z)}} \\bigg) \\frac{\\partial}{\\partial w_j}\\phi(z) As an intermediate step, we compute the partial derivative of the sigmoid function, which will come in handy later: \\begin{align} &\\frac{\\partial}{\\partial z} \\phi(z) = \\frac{\\partial}{{\\partial z}} \\frac{1}{1+e^{-z}} \\\\\\\\ &= \\frac{1}{(1 + e^{-z})^{2}} e^{-z}\\\\\\\\ &= \\frac{1}{1+e^{-z}} \\bigg(1 - \\frac{1}{1+e^{-z}} \\bigg)\\\\\\\\ &= \\phi(z)\\big(1-\\phi(z)\\big) \\end{align} Now, we re-substitute \\frac{\\partial}{\\partial z} \\phi(z) = \\phi(z) \\big(1 - \\phi(z)\\big) back into in the log-likelihood partial derivative equation and obtain the equation shown below: \\begin{align} & \\bigg(y \\frac{1}{\\phi{(z)}} - (1 - y) \\frac{1}{1 - \\phi(z)} \\bigg) \\frac{\\partial}{\\partial w_j} \\phi(z) \\\\\\\\ &= \\bigg(y \\frac{1}{\\phi{(z)}} - (1 - y) \\frac{1}{1 - \\phi(z)} \\bigg) \\phi(z) \\big(1 - \\phi(z)\\big) \\frac{\\partial}{\\partial w_j}z\\\\\\\\ &= \\big(y(1-\\phi(z)\\big) - (1 - y) \\phi(z)\\big)x_j\\\\\\\\ &=\\big(y - \\phi(z)\\big)x_j \\end{align} Now, in order to find the weights of the model, we take a step proportional to the positive direction of the gradient to maximize the log-likelihood. Futhermore, we add a coefficient, the learning rate \\eta to the weight update: \\begin{align} & w_j := w_j + \\eta \\frac{\\partial}{\\partial w_j} l(\\mathbf{w})\\\\\\\\ & w_j := w_j + \\eta \\sum^{n}_{i=1} \\big( y^{(i)} - \\phi\\big(z^{(i)}\\big)\\big)x_j^{(i)} \\end{align} Note that the gradient (and weight update) is computed from all samples in the training set in gradient ascent/descent in contrast to stochastic gradient ascent/descent. For more information about the differences between gradient descent and stochastic gradient descent, please see the related article Gradient Descent and Stochastic Gradient Descent . The previous equation shows the weight update for a single weight j . In gradient-based optimization, all weight coefficients are updated simultaneously; the weight update can be written more compactly as \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w}, where \\Delta{\\mathbf{w}} = \\eta \\nabla l(\\mathbf{w}) Gradient Descent and the logistic cost function In the previous section, we derived the gradient of the log-likelihood function, which can be optimized via gradient ascent. Similarly, we can obtain the cost gradient of the logistic cost function J(\\cdot) and minimize it via gradient descent in order to learn the logistic regression model. The update rule for a single weight: \\begin{align} & \\Delta{w_j} = -\\eta \\frac{\\partial J}{\\partial w_j} \\\\ & = - \\eta \\sum_{i=1}^{n}\\big(y^{(i)} - \\phi\\big(z^{(i)}\\big) x^{(i)} \\big) \\end{align} The simultaneous weight update: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w} where \\Delta{\\mathbf{w}} = - \\eta \\nabla J(\\mathbf{w}). Shuffling Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates Regularization As a way to tackle overfitting, we can add additional bias to the logistic regression model via a regularization terms. Via the L2 regularization term, we reduce the complexity of the model by penalizing large weight coefficients: L2: \\frac{\\lambda}{2}\\lVert \\mathbf{w} \\lVert_2 = \\frac{\\lambda}{2} \\sum_{j=1}^{m} w_j^2 In order to apply regularization, we just need to add the regularization term to the cost function that we defined for logistic regression to shrink the weights: J(\\mathbf{w}) = \\sum_{i=1}^{m} \\Bigg[ - y^{(i)} log \\bigg( \\phi\\big(z^{(i)}\\big) \\bigg) - \\big(1 - y^{(i)}\\big) log\\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg) \\Bigg] + \\frac{\\lambda}{2} \\sum_{j=1}^{m} w_j^2 The update rule for a single weight: \\begin{align} & \\Delta{w_j} = -\\eta \\bigg( \\frac{\\partial J}{\\partial w_j} + \\lambda w_j\\bigg)\\\\ & = - \\eta \\sum_{i=1}^{n}\\big(y^{(i)} - \\phi\\big(z^{(i)}\\big) x^{(i)} \\big) - \\eta \\lambda w_j \\end{align} The simultaneous weight update: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w} where \\Delta{\\mathbf{w}} = - \\eta \\big( \\nabla J(\\mathbf{w}) + \\lambda \\mathbf{w}\\big). For more information on regularization, please see Regularization of Generalized Linear Models . References Bishop, Christopher M. Pattern recognition and machine learning . Springer, 2006. pp. 203-213 Example 1 - Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.1, l2_lambda=0.0, epochs=100, minibatches=1, # for Gradient Descent random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 100/100 | Cost 0.32 | Elapsed: 0:00:00 | ETA: 0:00:00 Predicting Class Labels y_pred = lr.predict(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [1 1 1] Predicting Class Probabilities y_pred = lr.predict_proba(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [ 0.99997968 0.99339873 0.99992707] Example 2 - Stochastic Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.5, epochs=30, l2_lambda=0.0, minibatches=len(y), # for SGD learning random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 30/30 | Cost 0.27 | Elapsed: 0:00:00 | ETA: 0:00:00 Example 3 - Stochastic Gradient Descent w. Minibatches Here, we set minibatches to 5, which will result in Minibatch Learning with a batch size of 20 samples (since 100 Iris samples divided by 5 minibatches equals 20). from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.5, epochs=30, l2_lambda=0.0, minibatches=5, # 100/5 = 20 -> minibatch-s random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 30/30 | Cost 0.25 | Elapsed: 0:00:00 | ETA: 0:00:00 API LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Logistic Regression"},{"location":"user_guide/classifier/LogisticRegression/#logistic-regression","text":"A logistic regression class for binary classification tasks. from mlxtend.classifier import LogisticRegression","title":"Logistic Regression"},{"location":"user_guide/classifier/LogisticRegression/#overview","text":"Related to the Perceptron and 'Adaline' , a Logistic Regression model is a linear model for binary classification. However, instead of minimizing a linear cost function such as the sum of squared errors (SSE) in Adaline, we minimize a sigmoid function, i.e., the logistic function: \\phi(z) = \\frac{1}{1 + e^{-z}}, where z is defined as the net input z = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{j=0}^{m} w_j x_j= \\mathbf{w}^T\\mathbf{x}. The net input is in turn based on the logit function logit(p(y=1 \\mid \\mathbf{x})) = z. Here, p(y=1 \\mid \\mathbf{x}) is the conditional probability that a particular sample belongs to class 1 given its features \\mathbf{x} . The logit function takes inputs in the range [0, 1] and transform them to values over the entire real number range. In contrast, the logistic function takes input values over the entire real number range and transforms them to values in the range [0, 1]. In other words, the logistic function is the inverse of the logit function, and it lets us predict the conditional probability that a certain sample belongs to class 1 (or class 0). After model fitting, the conditional probability p(y=1 \\mid \\mathbf{x}) is converted to a binary class label via a threshold function g(\\cdot) : $$y = g({z}) = \\begin{cases} 1 & \\text{if $\\phi(z) \\ge 0.5$}\\\\ 0 & \\text{otherwise.} \\end{cases} $$ or equivalently: $$y = g({z}) = \\begin{cases} 1 & \\text{if z $\\ge$ 0}\\\\ 0 & \\text{otherwise}. \\end{cases} $$","title":"Overview"},{"location":"user_guide/classifier/LogisticRegression/#objective-function-log-likelihood","text":"In order to parameterize a logistic regression model, we maximize the likelihood L(\\cdot) (or minimize the logistic cost function). We write the likelihood as L(\\mathbf{w}) = P(\\mathbf{y} \\mid \\mathbf{x};\\mathbf{w}) = \\prod_{i=1}^{n} P\\big(y^{(i)} \\mid x^{(i)}; \\mathbf{w}\\big) = \\prod^{n}_{i=1}\\bigg(\\phi\\big(z^{(i)}\\big)\\bigg)^{y^{(i)}} \\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg)^{1-y^{(i)}}, under the assumption that the training samples are independent of each other. In practice, it is easier to maximize the (natural) log of this equation, which is called the log-likelihood function: l(\\mathbf{w}) = \\log L(\\mathbf{w}) = \\sum^{n}_{i=1} y^{(i)} \\log \\bigg(\\phi\\big(z^{(i)}\\big)\\bigg) + \\big( 1 - y^{(i)}\\big) \\log \\big(1-\\phi\\big(z^{(i)}\\big)\\big) One advantage of taking the log is to avoid numeric underflow (and challenges with floating point math) for very small likelihoods. Another advantage is that we can obtain the derivative more easily, using the addition trick to rewrite the product of factors as a summation term, which we can then maximize using optimization algorithms such as gradient ascent.","title":"Objective Function -- Log-Likelihood"},{"location":"user_guide/classifier/LogisticRegression/#objective-function-logistic-cost-function","text":"An alternative to maximizing the log-likelihood, we can define a cost function J(\\cdot) to be minimized; we rewrite the log-likelihood as: J(\\mathbf{w}) = \\sum_{i=1}^{m} - y^{(i)} log \\bigg( \\phi\\big(z^{(i)}\\big) \\bigg) - \\big(1 - y^{(i)}\\big) log\\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg) $$ J\\big(\\phi(z), y; \\mathbf{w}\\big) = \\begin{cases} -log\\big(\\phi(z) \\big) & \\text{if $y = 1$}\\\\ -log\\big(1- \\phi(z) \\big) & \\text{if $y = 0$} \\end{cases} $$ As we can see in the figure above, we penalize wrong predictions with an increasingly larger cost.","title":"Objective Function -- Logistic Cost Function"},{"location":"user_guide/classifier/LogisticRegression/#gradient-descent-gd-and-stochastic-gradient-descent-sgd-optimization","text":"","title":"Gradient Descent (GD) and Stochastic Gradient Descent (SGD) Optimization"},{"location":"user_guide/classifier/LogisticRegression/#gradient-ascent-and-the-log-likelihood","text":"To learn the weight coefficient of a logistic regression model via gradient-based optimization, we compute the partial derivative of the log-likelihood function -- w.r.t. the j th weight -- as follows: \\frac{\\partial}{\\partial w_j} l(\\mathbf{w}) = \\bigg(y \\frac{1}{\\phi(z)} - (1-y) \\frac{1}{1-\\phi{(z)}} \\bigg) \\frac{\\partial}{\\partial w_j}\\phi(z) As an intermediate step, we compute the partial derivative of the sigmoid function, which will come in handy later: \\begin{align} &\\frac{\\partial}{\\partial z} \\phi(z) = \\frac{\\partial}{{\\partial z}} \\frac{1}{1+e^{-z}} \\\\\\\\ &= \\frac{1}{(1 + e^{-z})^{2}} e^{-z}\\\\\\\\ &= \\frac{1}{1+e^{-z}} \\bigg(1 - \\frac{1}{1+e^{-z}} \\bigg)\\\\\\\\ &= \\phi(z)\\big(1-\\phi(z)\\big) \\end{align} Now, we re-substitute \\frac{\\partial}{\\partial z} \\phi(z) = \\phi(z) \\big(1 - \\phi(z)\\big) back into in the log-likelihood partial derivative equation and obtain the equation shown below: \\begin{align} & \\bigg(y \\frac{1}{\\phi{(z)}} - (1 - y) \\frac{1}{1 - \\phi(z)} \\bigg) \\frac{\\partial}{\\partial w_j} \\phi(z) \\\\\\\\ &= \\bigg(y \\frac{1}{\\phi{(z)}} - (1 - y) \\frac{1}{1 - \\phi(z)} \\bigg) \\phi(z) \\big(1 - \\phi(z)\\big) \\frac{\\partial}{\\partial w_j}z\\\\\\\\ &= \\big(y(1-\\phi(z)\\big) - (1 - y) \\phi(z)\\big)x_j\\\\\\\\ &=\\big(y - \\phi(z)\\big)x_j \\end{align} Now, in order to find the weights of the model, we take a step proportional to the positive direction of the gradient to maximize the log-likelihood. Futhermore, we add a coefficient, the learning rate \\eta to the weight update: \\begin{align} & w_j := w_j + \\eta \\frac{\\partial}{\\partial w_j} l(\\mathbf{w})\\\\\\\\ & w_j := w_j + \\eta \\sum^{n}_{i=1} \\big( y^{(i)} - \\phi\\big(z^{(i)}\\big)\\big)x_j^{(i)} \\end{align} Note that the gradient (and weight update) is computed from all samples in the training set in gradient ascent/descent in contrast to stochastic gradient ascent/descent. For more information about the differences between gradient descent and stochastic gradient descent, please see the related article Gradient Descent and Stochastic Gradient Descent . The previous equation shows the weight update for a single weight j . In gradient-based optimization, all weight coefficients are updated simultaneously; the weight update can be written more compactly as \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w}, where \\Delta{\\mathbf{w}} = \\eta \\nabla l(\\mathbf{w})","title":"Gradient Ascent and the log-likelihood"},{"location":"user_guide/classifier/LogisticRegression/#gradient-descent-and-the-logistic-cost-function","text":"In the previous section, we derived the gradient of the log-likelihood function, which can be optimized via gradient ascent. Similarly, we can obtain the cost gradient of the logistic cost function J(\\cdot) and minimize it via gradient descent in order to learn the logistic regression model. The update rule for a single weight: \\begin{align} & \\Delta{w_j} = -\\eta \\frac{\\partial J}{\\partial w_j} \\\\ & = - \\eta \\sum_{i=1}^{n}\\big(y^{(i)} - \\phi\\big(z^{(i)}\\big) x^{(i)} \\big) \\end{align} The simultaneous weight update: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w} where \\Delta{\\mathbf{w}} = - \\eta \\nabla J(\\mathbf{w}).","title":"Gradient Descent and the logistic cost function"},{"location":"user_guide/classifier/LogisticRegression/#shuffling","text":"Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates","title":"Shuffling"},{"location":"user_guide/classifier/LogisticRegression/#regularization","text":"As a way to tackle overfitting, we can add additional bias to the logistic regression model via a regularization terms. Via the L2 regularization term, we reduce the complexity of the model by penalizing large weight coefficients: L2: \\frac{\\lambda}{2}\\lVert \\mathbf{w} \\lVert_2 = \\frac{\\lambda}{2} \\sum_{j=1}^{m} w_j^2 In order to apply regularization, we just need to add the regularization term to the cost function that we defined for logistic regression to shrink the weights: J(\\mathbf{w}) = \\sum_{i=1}^{m} \\Bigg[ - y^{(i)} log \\bigg( \\phi\\big(z^{(i)}\\big) \\bigg) - \\big(1 - y^{(i)}\\big) log\\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg) \\Bigg] + \\frac{\\lambda}{2} \\sum_{j=1}^{m} w_j^2 The update rule for a single weight: \\begin{align} & \\Delta{w_j} = -\\eta \\bigg( \\frac{\\partial J}{\\partial w_j} + \\lambda w_j\\bigg)\\\\ & = - \\eta \\sum_{i=1}^{n}\\big(y^{(i)} - \\phi\\big(z^{(i)}\\big) x^{(i)} \\big) - \\eta \\lambda w_j \\end{align} The simultaneous weight update: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w} where \\Delta{\\mathbf{w}} = - \\eta \\big( \\nabla J(\\mathbf{w}) + \\lambda \\mathbf{w}\\big). For more information on regularization, please see Regularization of Generalized Linear Models .","title":"Regularization"},{"location":"user_guide/classifier/LogisticRegression/#references","text":"Bishop, Christopher M. Pattern recognition and machine learning . Springer, 2006. pp. 203-213","title":"References"},{"location":"user_guide/classifier/LogisticRegression/#example-1-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.1, l2_lambda=0.0, epochs=100, minibatches=1, # for Gradient Descent random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 100/100 | Cost 0.32 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 1 - Gradient Descent"},{"location":"user_guide/classifier/LogisticRegression/#predicting-class-labels","text":"y_pred = lr.predict(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [1 1 1]","title":"Predicting Class Labels"},{"location":"user_guide/classifier/LogisticRegression/#predicting-class-probabilities","text":"y_pred = lr.predict_proba(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [ 0.99997968 0.99339873 0.99992707]","title":"Predicting Class Probabilities"},{"location":"user_guide/classifier/LogisticRegression/#example-2-stochastic-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.5, epochs=30, l2_lambda=0.0, minibatches=len(y), # for SGD learning random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 30/30 | Cost 0.27 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 2 - Stochastic Gradient Descent"},{"location":"user_guide/classifier/LogisticRegression/#example-3-stochastic-gradient-descent-w-minibatches","text":"Here, we set minibatches to 5, which will result in Minibatch Learning with a batch size of 20 samples (since 100 Iris samples divided by 5 minibatches equals 20). from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import LogisticRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = LogisticRegression(eta=0.5, epochs=30, l2_lambda=0.0, minibatches=5, # 100/5 = 20 -> minibatch-s random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Logistic Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 30/30 | Cost 0.25 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 3 - Stochastic Gradient Descent w. Minibatches"},{"location":"user_guide/classifier/LogisticRegression/#api","text":"LogisticRegression(eta=0.01, epochs=50, l2_lambda=0.0, minibatches=1, random_seed=None, print_progress=0) Logistic regression classifier. Note that this implementation of Logistic Regression expects binary class labels in {0, 1}. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2_lambda : float Regularization parameter for L2 regularization. No regularization if l2_lambda=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats with cross_entropy cost (sgd or gd) for every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/","title":"API"},{"location":"user_guide/classifier/LogisticRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class 1 probability : float score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Methods"},{"location":"user_guide/classifier/MultiLayerPerceptron/","text":"Neural Network - Multilayer Perceptron Implementation of a multilayer perceptron, a feedforward artificial neural network. from mlxtend.classifier import MultiLayerPerceptron Overview Although the code is fully working and can be used for common classification tasks, this implementation is not geared towards efficiency but clarity \u2013 the original code was written for demonstration purposes. Basic Architecture The neurons x_0 and a_0 represent the bias units ( x_0=1 , a_0=1 ). The i th superscript denotes the i th layer, and the j th subscripts stands for the index of the respective unit. For example, a_{1}^{(2)} refers to the first activation unit after the bias unit (i.e., 2nd activation unit) in the 2nd layer (here: the hidden layer) \\begin{align} \\mathbf{a^{(2)}} &= \\begin{bmatrix} a_{0}^{(2)} \\\\ a_{1}^{(2)} \\\\ \\vdots \\\\ a_{m}^{(2)} \\end{bmatrix}. \\end{align} Each layer (l) in a multi-layer perceptron, a directed graph, is fully connected to the next layer (l+1) . We write the weight coefficient that connects the k th unit in the l th layer to the j th unit in layer l+1 as w^{(l)}_{j, k} . For example, the weight coefficient that connects the units a_0^{(2)} \\rightarrow a_1^{(3)} would be written as w_{1,0}^{(2)} . Activation In the current implementation, the activations of the hidden layer(s) are computed via the logistic (sigmoid) function \\phi(z) = \\frac{1}{1 + e^{-z}}. (For more details on the logistic function, please see classifier.LogisticRegression ; a general overview of different activation function can be found here .) Furthermore, the MLP uses the softmax function in the output layer, For more details on the logistic function, please see classifier.SoftmaxRegression . References D. R. G. H. R. Williams and G. Hinton. Learning representations by back-propagating errors . Nature, pages 323\u2013533, 1986. C. M. Bishop. Neural networks for pattern recognition . Oxford University Press, 1995. T. Hastie, J. Friedman, and R. Tibshirani. The Elements of Statistical Learning , Volume 2. Springer, 2009. Example 1 - Classifying Iris Flowers Load 2 features from Iris (petal length and petal width) for visualization purposes: from mlxtend.data import iris_data X, y = iris_data() X = X[:, [0, 3]] # standardize training data X_std = (X - X.mean(axis=0)) / X.std(axis=0) Train neural network for 3 output flower classes ('Setosa', 'Versicolor', 'Virginica'), regular gradient decent ( minibatches=1 ), 30 hidden units, and no regularization. Gradient Descent Setting the minibatches to 1 will result in gradient descent training; please see Gradient Descent vs. Stochastic Gradient Descent for details. from mlxtend.classifier import MultiLayerPerceptron as MLP nn1 = MLP(hidden_layers=[50], l2=0.00, l1=0.0, epochs=150, eta=0.05, momentum=0.1, decrease_const=0.0, minibatches=1, random_seed=1, print_progress=3) nn1 = nn1.fit(X_std, y) Iteration: 150/150 | Cost 0.06 | Elapsed: 0:00:00 | ETA: 0:00:00 from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt fig = plot_decision_regions(X=X_std, y=y, clf=nn1, legend=2) plt.title('Multi-layer Perceptron w. 1 hidden layer (logistic sigmoid)') plt.show() import matplotlib.pyplot as plt plt.plot(range(len(nn1.cost_)), nn1.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() print('Accuracy: %.2f%%' % (100 * nn1.score(X_std, y))) Accuracy: 96.67% Stochastic Gradient Descent Setting minibatches to n_samples will result in stochastic gradient descent training; please see Gradient Descent vs. Stochastic Gradient Descent for details. nn2 = MLP(hidden_layers=[50], l2=0.00, l1=0.0, epochs=5, eta=0.005, momentum=0.1, decrease_const=0.0, minibatches=len(y), random_seed=1, print_progress=3) nn2.fit(X_std, y) plt.plot(range(len(nn2.cost_)), nn2.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() Iteration: 5/5 | Cost 0.11 | Elapsed: 00:00:00 | ETA: 00:00:00 Continue the training for 25 epochs... nn2.epochs = 25 nn2 = nn2.fit(X_std, y) Iteration: 25/25 | Cost 0.07 | Elapsed: 0:00:00 | ETA: 0:00:00 plt.plot(range(len(nn2.cost_)), nn2.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() Example 2 - Classifying Handwritten Digits from a 10% MNIST Subset Load a 5000-sample subset of the MNIST dataset (please see data.loadlocal_mnist if you want to download and read in the complete MNIST dataset). from mlxtend.data import mnist_data from mlxtend.preprocessing import shuffle_arrays_unison X, y = mnist_data() X, y = shuffle_arrays_unison((X, y), random_seed=1) X_train, y_train = X[:500], y[:500] X_test, y_test = X[500:], y[500:] Visualize a sample from the MNIST dataset to check if it was loaded correctly: import matplotlib.pyplot as plt def plot_digit(X, y, idx): img = X[idx].reshape(28,28) plt.imshow(img, cmap='Greys', interpolation='nearest') plt.title('true label: %d' % y[idx]) plt.show() plot_digit(X, y, 3500) Standardize pixel values: import numpy as np from mlxtend.preprocessing import standardize X_train_std, params = standardize(X_train, columns=range(X_train.shape[1]), return_params=True) X_test_std = standardize(X_test, columns=range(X_test.shape[1]), params=params) Initialize the neural network to recognize the 10 different digits (0-10) using 300 epochs and mini-batch learning. nn1 = MLP(hidden_layers=[150], l2=0.00, l1=0.0, epochs=100, eta=0.005, momentum=0.0, decrease_const=0.0, minibatches=100, random_seed=1, print_progress=3) Learn the features while printing the progress to get an idea about how long it may take. import matplotlib.pyplot as plt nn1.fit(X_train_std, y_train) plt.plot(range(len(nn1.cost_)), nn1.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() Iteration: 100/100 | Cost 0.01 | Elapsed: 0:00:17 | ETA: 0:00:00 print('Train Accuracy: %.2f%%' % (100 * nn1.score(X_train_std, y_train))) print('Test Accuracy: %.2f%%' % (100 * nn1.score(X_test_std, y_test))) Train Accuracy: 100.00% Test Accuracy: 84.62% Please note that this neural network has been trained on only 10% of the MNIST data for technical demonstration purposes, hence, the lousy predictive performance. API MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Neural Network - Multilayer Perceptron"},{"location":"user_guide/classifier/MultiLayerPerceptron/#neural-network-multilayer-perceptron","text":"Implementation of a multilayer perceptron, a feedforward artificial neural network. from mlxtend.classifier import MultiLayerPerceptron","title":"Neural Network - Multilayer Perceptron"},{"location":"user_guide/classifier/MultiLayerPerceptron/#overview","text":"Although the code is fully working and can be used for common classification tasks, this implementation is not geared towards efficiency but clarity \u2013 the original code was written for demonstration purposes.","title":"Overview"},{"location":"user_guide/classifier/MultiLayerPerceptron/#basic-architecture","text":"The neurons x_0 and a_0 represent the bias units ( x_0=1 , a_0=1 ). The i th superscript denotes the i th layer, and the j th subscripts stands for the index of the respective unit. For example, a_{1}^{(2)} refers to the first activation unit after the bias unit (i.e., 2nd activation unit) in the 2nd layer (here: the hidden layer) \\begin{align} \\mathbf{a^{(2)}} &= \\begin{bmatrix} a_{0}^{(2)} \\\\ a_{1}^{(2)} \\\\ \\vdots \\\\ a_{m}^{(2)} \\end{bmatrix}. \\end{align} Each layer (l) in a multi-layer perceptron, a directed graph, is fully connected to the next layer (l+1) . We write the weight coefficient that connects the k th unit in the l th layer to the j th unit in layer l+1 as w^{(l)}_{j, k} . For example, the weight coefficient that connects the units a_0^{(2)} \\rightarrow a_1^{(3)} would be written as w_{1,0}^{(2)} .","title":"Basic Architecture"},{"location":"user_guide/classifier/MultiLayerPerceptron/#activation","text":"In the current implementation, the activations of the hidden layer(s) are computed via the logistic (sigmoid) function \\phi(z) = \\frac{1}{1 + e^{-z}}. (For more details on the logistic function, please see classifier.LogisticRegression ; a general overview of different activation function can be found here .) Furthermore, the MLP uses the softmax function in the output layer, For more details on the logistic function, please see classifier.SoftmaxRegression .","title":"Activation"},{"location":"user_guide/classifier/MultiLayerPerceptron/#references","text":"D. R. G. H. R. Williams and G. Hinton. Learning representations by back-propagating errors . Nature, pages 323\u2013533, 1986. C. M. Bishop. Neural networks for pattern recognition . Oxford University Press, 1995. T. Hastie, J. Friedman, and R. Tibshirani. The Elements of Statistical Learning , Volume 2. Springer, 2009.","title":"References"},{"location":"user_guide/classifier/MultiLayerPerceptron/#example-1-classifying-iris-flowers","text":"Load 2 features from Iris (petal length and petal width) for visualization purposes: from mlxtend.data import iris_data X, y = iris_data() X = X[:, [0, 3]] # standardize training data X_std = (X - X.mean(axis=0)) / X.std(axis=0) Train neural network for 3 output flower classes ('Setosa', 'Versicolor', 'Virginica'), regular gradient decent ( minibatches=1 ), 30 hidden units, and no regularization.","title":"Example 1 - Classifying Iris Flowers"},{"location":"user_guide/classifier/MultiLayerPerceptron/#gradient-descent","text":"Setting the minibatches to 1 will result in gradient descent training; please see Gradient Descent vs. Stochastic Gradient Descent for details. from mlxtend.classifier import MultiLayerPerceptron as MLP nn1 = MLP(hidden_layers=[50], l2=0.00, l1=0.0, epochs=150, eta=0.05, momentum=0.1, decrease_const=0.0, minibatches=1, random_seed=1, print_progress=3) nn1 = nn1.fit(X_std, y) Iteration: 150/150 | Cost 0.06 | Elapsed: 0:00:00 | ETA: 0:00:00 from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt fig = plot_decision_regions(X=X_std, y=y, clf=nn1, legend=2) plt.title('Multi-layer Perceptron w. 1 hidden layer (logistic sigmoid)') plt.show() import matplotlib.pyplot as plt plt.plot(range(len(nn1.cost_)), nn1.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() print('Accuracy: %.2f%%' % (100 * nn1.score(X_std, y))) Accuracy: 96.67%","title":"Gradient Descent"},{"location":"user_guide/classifier/MultiLayerPerceptron/#stochastic-gradient-descent","text":"Setting minibatches to n_samples will result in stochastic gradient descent training; please see Gradient Descent vs. Stochastic Gradient Descent for details. nn2 = MLP(hidden_layers=[50], l2=0.00, l1=0.0, epochs=5, eta=0.005, momentum=0.1, decrease_const=0.0, minibatches=len(y), random_seed=1, print_progress=3) nn2.fit(X_std, y) plt.plot(range(len(nn2.cost_)), nn2.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() Iteration: 5/5 | Cost 0.11 | Elapsed: 00:00:00 | ETA: 00:00:00 Continue the training for 25 epochs... nn2.epochs = 25 nn2 = nn2.fit(X_std, y) Iteration: 25/25 | Cost 0.07 | Elapsed: 0:00:00 | ETA: 0:00:00 plt.plot(range(len(nn2.cost_)), nn2.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show()","title":"Stochastic Gradient Descent"},{"location":"user_guide/classifier/MultiLayerPerceptron/#example-2-classifying-handwritten-digits-from-a-10-mnist-subset","text":"Load a 5000-sample subset of the MNIST dataset (please see data.loadlocal_mnist if you want to download and read in the complete MNIST dataset). from mlxtend.data import mnist_data from mlxtend.preprocessing import shuffle_arrays_unison X, y = mnist_data() X, y = shuffle_arrays_unison((X, y), random_seed=1) X_train, y_train = X[:500], y[:500] X_test, y_test = X[500:], y[500:] Visualize a sample from the MNIST dataset to check if it was loaded correctly: import matplotlib.pyplot as plt def plot_digit(X, y, idx): img = X[idx].reshape(28,28) plt.imshow(img, cmap='Greys', interpolation='nearest') plt.title('true label: %d' % y[idx]) plt.show() plot_digit(X, y, 3500) Standardize pixel values: import numpy as np from mlxtend.preprocessing import standardize X_train_std, params = standardize(X_train, columns=range(X_train.shape[1]), return_params=True) X_test_std = standardize(X_test, columns=range(X_test.shape[1]), params=params) Initialize the neural network to recognize the 10 different digits (0-10) using 300 epochs and mini-batch learning. nn1 = MLP(hidden_layers=[150], l2=0.00, l1=0.0, epochs=100, eta=0.005, momentum=0.0, decrease_const=0.0, minibatches=100, random_seed=1, print_progress=3) Learn the features while printing the progress to get an idea about how long it may take. import matplotlib.pyplot as plt nn1.fit(X_train_std, y_train) plt.plot(range(len(nn1.cost_)), nn1.cost_) plt.ylabel('Cost') plt.xlabel('Epochs') plt.show() Iteration: 100/100 | Cost 0.01 | Elapsed: 0:00:17 | ETA: 0:00:00 print('Train Accuracy: %.2f%%' % (100 * nn1.score(X_train_std, y_train))) print('Test Accuracy: %.2f%%' % (100 * nn1.score(X_test_std, y_test))) Train Accuracy: 100.00% Test Accuracy: 84.62% Please note that this neural network has been trained on only 10% of the MNIST data for technical demonstration purposes, hence, the lousy predictive performance.","title":"Example 2 - Classifying Handwritten Digits from a 10% MNIST Subset"},{"location":"user_guide/classifier/MultiLayerPerceptron/#api","text":"MultiLayerPerceptron(eta=0.5, epochs=50, hidden_layers=[50], n_classes=None, momentum=0.0, l1=0.0, l2=0.0, dropout=1.0, decrease_const=0.0, minibatches=1, random_seed=None, print_progress=0) Multi-layer perceptron classifier with logistic sigmoid activations Parameters eta : float (default: 0.5) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. hidden_layers : list (default: [50]) Number of units per hidden layer. By default 50 units in the first hidden layer. At the moment only 1 hidden layer is supported n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. l1 : float (default: 0.0) L1 regularization strength l2 : float (default: 0.0) L2 regularization strength momentum : float (default: 0.0) Momentum constant. Factor multiplied with the gradient of the previous epoch t-1 to improve learning speed w(t) := w(t) - (grad(t) + momentum * grad(t-1)) decrease_const : float (default: 0.0) Decrease constant. Shrinks the learning rate after each epoch via eta / (1 + epoch*decrease_const) minibatches : int (default: 1) Divide the training data into k minibatches for accelerated stochastic gradient descent learning. Gradient Descent Learning if minibatches = 1 Stochastic Gradient Descent learning if minibatches = len(y) Minibatch learning if minibatches > 1 random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape=[n_features, n_classes] Weights after fitting. b_ : 1D-array, shape=[n_classes] Bias units after fitting. cost_ : list List of floats; the mean categorical cross entropy cost after each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/","title":"API"},{"location":"user_guide/classifier/MultiLayerPerceptron/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Methods"},{"location":"user_guide/classifier/Perceptron/","text":"Perceptron Implementation of a Perceptron learning algorithm for classification. from mlxtend.classifier import Perceptron Overview The idea behind this \"thresholded\" perceptron was to mimic how a single neuron in the brain works: It either \"fires\" or not. A perceptron receives multiple input signals, and if the sum of the input signals exceed a certain threshold it either returns a signal or remains \"silent\" otherwise. What made this a \"machine learning\" algorithm was Frank Rosenblatt's idea of the perceptron learning rule: The perceptron algorithm is about learning the weights for the input signals in order to draw linear decision boundary that allows us to discriminate between the two linearly separable classes +1 and -1. Basic Notation Before we dive deeper into the algorithm(s) for learning the weights of the perceptron classifier, let us take a brief look at the basic notation. In the following sections, we will label the positive and negative class in our binary classification setting as \"1\" and \"-1\", respectively. Next, we define an activation function g(\\mathbf{z}) that takes a linear combination of the input values \\mathbf{x} and weights \\mathbf{w} as input ( \\mathbf{z} = w_1x_{1} + \\dots + w_mx_{m} ), and if g(\\mathbf{z}) is greater than a defined threshold \\theta we predict 1 and -1 otherwise; in this case, this activation function g is a simple \"unit step function,\" which is sometimes also called \"Heaviside step function.\" $$ g(z) = \\begin{cases} 1 & \\text{if $z \\ge \\theta$}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ where z = w_1x_{1} + \\dots + w_mx_{m} = \\sum_{j=1}^{m} x_{j}w_{j} \\\\ = \\mathbf{w}^T\\mathbf{x} \\mathbf{w} is the feature vector, and \\mathbf{x} is an m -dimensional sample from the training dataset: \\mathbf{w} = \\begin{bmatrix} w_{1} \\\\ \\vdots \\\\ w_{m} \\end{bmatrix} \\quad \\mathbf{x} = \\begin{bmatrix} x_{1} \\\\ \\vdots \\\\ x_{m} \\end{bmatrix} In order to simplify the notation, we bring \\theta to the left side of the equation and define w_0 = -\\theta \\text{ and } x_0=1 so that $$ g({z}) = \\begin{cases} 1 & \\text{if $z \\ge 0$}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ and z = w_0x_{0} + w_1x_{1} + \\dots + w_mx_{m} = \\sum_{j=0}^{m} x_{j}w_{j} \\\\ = \\mathbf{w}^T\\mathbf{x}. Perceptron Rule Rosenblatt's initial perceptron rule is fairly simple and can be summarized by the following steps: Initialize the weights to 0 or small random numbers. For each training sample \\mathbf{x^{(i)}} : Calculate the output value. Update the weights. The output value is the class label predicted by the unit step function that we defined earlier (output =g(\\mathbf{z}) ) and the weight update can be written more formally as w_j := w_j + \\Delta w_j . The value for updating the weights at each increment is calculated by the learning rule \\Delta w_j = \\eta \\; (\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{j} where \\eta is the learning rate (a constant between 0.0 and 1.0), \"target\" is the true class label, and the \"output\" is the predicted class label. aIt is important to note that all weights in the weight vector are being updated simultaneously. Concretely, for a 2-dimensional dataset, we would write the update as: \\Delta w_0 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)}) \\Delta w_1 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{1} \\Delta w_2 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{2} Before we implement the perceptron rule in Python, let us make a simple thought experiment to illustrate how beautifully simple this learning rule really is. In the two scenarios where the perceptron predicts the class label correctly, the weights remain unchanged: \\Delta w_j = \\eta(-1^{(i)} - -1^{(i)})\\;x^{(i)}_{j} = 0 \\Delta w_j = \\eta(1^{(i)} - 1^{(i)})\\;x^{(i)}_{j} = 0 However, in case of a wrong prediction, the weights are being \"pushed\" towards the direction of the positive or negative target class, respectively: \\Delta w_j = \\eta(1^{(i)} - -1^{(i)})\\;x^{(i)}_{j} = \\eta(2)\\;x^{(i)}_{j} \\Delta w_j = \\eta(-1^{(i)} - 1^{(i)})\\;x^{(i)}_{j} = \\eta(-2)\\;x^{(i)}_{j} It is important to note that the convergence of the perceptron is only guaranteed if the two classes are linearly separable. If the two classes can't be separated by a linear decision boundary, we can set a maximum number of passes over the training dataset (\"epochs\") and/or a threshold for the number of tolerated misclassifications. References F. Rosenblatt. The perceptron, a perceiving and recognizing automaton Project Para. Cornell Aeronautical Laboratory, 1957. Example 1 - Classification of Iris Flowers from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Perceptron import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() # Rosenblatt Perceptron ppn = Perceptron(epochs=5, eta=0.05, random_seed=0, print_progress=3) ppn.fit(X, y) plot_decision_regions(X, y, clf=ppn) plt.title('Perceptron - Rosenblatt Perceptron Rule') plt.show() print('Bias & Weights: %s' % ppn.w_) plt.plot(range(len(ppn.cost_)), ppn.cost_) plt.xlabel('Iterations') plt.ylabel('Missclassifications') plt.show() Iteration: 5/5 | Elapsed: 00:00:00 | ETA: 00:00:00 Bias & Weights: [[-0.04500809] [ 0.11048855]] API Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Perceptron"},{"location":"user_guide/classifier/Perceptron/#perceptron","text":"Implementation of a Perceptron learning algorithm for classification. from mlxtend.classifier import Perceptron","title":"Perceptron"},{"location":"user_guide/classifier/Perceptron/#overview","text":"The idea behind this \"thresholded\" perceptron was to mimic how a single neuron in the brain works: It either \"fires\" or not. A perceptron receives multiple input signals, and if the sum of the input signals exceed a certain threshold it either returns a signal or remains \"silent\" otherwise. What made this a \"machine learning\" algorithm was Frank Rosenblatt's idea of the perceptron learning rule: The perceptron algorithm is about learning the weights for the input signals in order to draw linear decision boundary that allows us to discriminate between the two linearly separable classes +1 and -1.","title":"Overview"},{"location":"user_guide/classifier/Perceptron/#basic-notation","text":"Before we dive deeper into the algorithm(s) for learning the weights of the perceptron classifier, let us take a brief look at the basic notation. In the following sections, we will label the positive and negative class in our binary classification setting as \"1\" and \"-1\", respectively. Next, we define an activation function g(\\mathbf{z}) that takes a linear combination of the input values \\mathbf{x} and weights \\mathbf{w} as input ( \\mathbf{z} = w_1x_{1} + \\dots + w_mx_{m} ), and if g(\\mathbf{z}) is greater than a defined threshold \\theta we predict 1 and -1 otherwise; in this case, this activation function g is a simple \"unit step function,\" which is sometimes also called \"Heaviside step function.\" $$ g(z) = \\begin{cases} 1 & \\text{if $z \\ge \\theta$}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ where z = w_1x_{1} + \\dots + w_mx_{m} = \\sum_{j=1}^{m} x_{j}w_{j} \\\\ = \\mathbf{w}^T\\mathbf{x} \\mathbf{w} is the feature vector, and \\mathbf{x} is an m -dimensional sample from the training dataset: \\mathbf{w} = \\begin{bmatrix} w_{1} \\\\ \\vdots \\\\ w_{m} \\end{bmatrix} \\quad \\mathbf{x} = \\begin{bmatrix} x_{1} \\\\ \\vdots \\\\ x_{m} \\end{bmatrix} In order to simplify the notation, we bring \\theta to the left side of the equation and define w_0 = -\\theta \\text{ and } x_0=1 so that $$ g({z}) = \\begin{cases} 1 & \\text{if $z \\ge 0$}\\\\ -1 & \\text{otherwise}. \\end{cases} $$ and z = w_0x_{0} + w_1x_{1} + \\dots + w_mx_{m} = \\sum_{j=0}^{m} x_{j}w_{j} \\\\ = \\mathbf{w}^T\\mathbf{x}.","title":"Basic Notation"},{"location":"user_guide/classifier/Perceptron/#perceptron-rule","text":"Rosenblatt's initial perceptron rule is fairly simple and can be summarized by the following steps: Initialize the weights to 0 or small random numbers. For each training sample \\mathbf{x^{(i)}} : Calculate the output value. Update the weights. The output value is the class label predicted by the unit step function that we defined earlier (output =g(\\mathbf{z}) ) and the weight update can be written more formally as w_j := w_j + \\Delta w_j . The value for updating the weights at each increment is calculated by the learning rule \\Delta w_j = \\eta \\; (\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{j} where \\eta is the learning rate (a constant between 0.0 and 1.0), \"target\" is the true class label, and the \"output\" is the predicted class label. aIt is important to note that all weights in the weight vector are being updated simultaneously. Concretely, for a 2-dimensional dataset, we would write the update as: \\Delta w_0 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)}) \\Delta w_1 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{1} \\Delta w_2 = \\eta(\\text{target}^{(i)} - \\text{output}^{(i)})\\;x^{(i)}_{2} Before we implement the perceptron rule in Python, let us make a simple thought experiment to illustrate how beautifully simple this learning rule really is. In the two scenarios where the perceptron predicts the class label correctly, the weights remain unchanged: \\Delta w_j = \\eta(-1^{(i)} - -1^{(i)})\\;x^{(i)}_{j} = 0 \\Delta w_j = \\eta(1^{(i)} - 1^{(i)})\\;x^{(i)}_{j} = 0 However, in case of a wrong prediction, the weights are being \"pushed\" towards the direction of the positive or negative target class, respectively: \\Delta w_j = \\eta(1^{(i)} - -1^{(i)})\\;x^{(i)}_{j} = \\eta(2)\\;x^{(i)}_{j} \\Delta w_j = \\eta(-1^{(i)} - 1^{(i)})\\;x^{(i)}_{j} = \\eta(-2)\\;x^{(i)}_{j} It is important to note that the convergence of the perceptron is only guaranteed if the two classes are linearly separable. If the two classes can't be separated by a linear decision boundary, we can set a maximum number of passes over the training dataset (\"epochs\") and/or a threshold for the number of tolerated misclassifications.","title":"Perceptron Rule"},{"location":"user_guide/classifier/Perceptron/#references","text":"F. Rosenblatt. The perceptron, a perceiving and recognizing automaton Project Para. Cornell Aeronautical Laboratory, 1957.","title":"References"},{"location":"user_guide/classifier/Perceptron/#example-1-classification-of-iris-flowers","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import Perceptron import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width X = X[0:100] # class 0 and class 1 y = y[0:100] # class 0 and class 1 # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() # Rosenblatt Perceptron ppn = Perceptron(epochs=5, eta=0.05, random_seed=0, print_progress=3) ppn.fit(X, y) plot_decision_regions(X, y, clf=ppn) plt.title('Perceptron - Rosenblatt Perceptron Rule') plt.show() print('Bias & Weights: %s' % ppn.w_) plt.plot(range(len(ppn.cost_)), ppn.cost_) plt.xlabel('Iterations') plt.ylabel('Missclassifications') plt.show() Iteration: 5/5 | Elapsed: 00:00:00 | ETA: 00:00:00 Bias & Weights: [[-0.04500809] [ 0.11048855]]","title":"Example 1 - Classification of Iris Flowers"},{"location":"user_guide/classifier/Perceptron/#api","text":"Perceptron(eta=0.1, epochs=50, random_seed=None, print_progress=0) Perceptron classifier. Note that this implementation of the Perceptron expects binary class labels in {0, 1}. Parameters eta : float (default: 0.1) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Number of passes over the training dataset. Prior to each epoch, the dataset is shuffled to prevent cycles. random_seed : int Random state for initializing random weights and shuffling. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Number of misclassifications in every epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/","title":"API"},{"location":"user_guide/classifier/Perceptron/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Methods"},{"location":"user_guide/classifier/SoftmaxRegression/","text":"Softmax Regression A logistic regression class for multi-class classification tasks. from mlxtend.classifier import SoftmaxRegression Overview Softmax Regression (synonyms: Multinomial Logistic , Maximum Entropy Classifier , or just Multi-class Logistic Regression ) is a generalization of logistic regression that we can use for multi-class classification (under the assumption that the classes are mutually exclusive). In contrast, we use the (standard) Logistic Regression model in binary classification tasks. Below is a schematic of a Logistic Regression model, for more details, please see the LogisticRegression manual . In Softmax Regression (SMR), we replace the sigmoid logistic function by the so-called softmax function \\phi_{softmax}(\\cdot) . P(y=j \\mid z^{(i)}) = \\phi_{softmax}(z^{(i)}) = \\frac{e^{z^{(i)}}}{\\sum_{j=0}^{k} e^{z_{k}^{(i)}}}, where we define the net input z as z = w_1x_1 + ... + w_mx_m + b= \\sum_{l=1}^{m} w_l x_l + b= \\mathbf{w}^T\\mathbf{x} + b. ( w is the weight vector, \\mathbf{x} is the feature vector of 1 training sample, and b is the bias unit.) Now, this softmax function computes the probability that this training sample \\mathbf{x}^{(i)} belongs to class j given the weight and net input z^{(i)} . So, we compute the probability p(y = j \\mid \\mathbf{x^{(i)}; w}_j) for each class label in j = 1, \\ldots, k. . Note the normalization term in the denominator which causes these class probabilities to sum up to one. To illustrate the concept of softmax, let us walk through a concrete example. Let's assume we have a training set consisting of 4 samples from 3 different classes (0, 1, and 2) x_0 \\rightarrow \\text{class }0 x_1 \\rightarrow \\text{class }1 x_2 \\rightarrow \\text{class }2 x_3 \\rightarrow \\text{class }2 import numpy as np y = np.array([0, 1, 2, 2]) First, we want to encode the class labels into a format that we can more easily work with; we apply one-hot encoding: y_enc = (np.arange(np.max(y) + 1) == y[:, None]).astype(float) print('one-hot encoding:\\n', y_enc) one-hot encoding: [[ 1. 0. 0.] [ 0. 1. 0.] [ 0. 0. 1.] [ 0. 0. 1.]] A sample that belongs to class 0 (the first row) has a 1 in the first cell, a sample that belongs to class 2 has a 1 in the second cell of its row, and so forth. Next, let us define the feature matrix of our 4 training samples. Here, we assume that our dataset consists of 2 features; thus, we create a 4x2 dimensional matrix of our samples and features. Similarly, we create a 2x3 dimensional weight matrix (one row per feature and one column for each class). X = np.array([[0.1, 0.5], [1.1, 2.3], [-1.1, -2.3], [-1.5, -2.5]]) W = np.array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]) bias = np.array([0.01, 0.1, 0.1]) print('Inputs X:\\n', X) print('\\nWeights W:\\n', W) print('\\nbias:\\n', bias) Inputs X: [[ 0.1 0.5] [ 1.1 2.3] [-1.1 -2.3] [-1.5 -2.5]] Weights W: [[ 0.1 0.2 0.3] [ 0.1 0.2 0.3]] bias: [ 0.01 0.1 0.1 ] To compute the net input, we multiply the 4x2 matrix feature matrix X with the 2x3 (n_features x n_classes) weight matrix W , which yields a 4x3 output matrix (n_samples x n_classes) to which we then add the bias unit: \\mathbf{Z} = \\mathbf{X}\\mathbf{W} + \\mathbf{b}. X = np.array([[0.1, 0.5], [1.1, 2.3], [-1.1, -2.3], [-1.5, -2.5]]) W = np.array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]) bias = np.array([0.01, 0.1, 0.1]) print('Inputs X:\\n', X) print('\\nWeights W:\\n', W) print('\\nbias:\\n', bias) Inputs X: [[ 0.1 0.5] [ 1.1 2.3] [-1.1 -2.3] [-1.5 -2.5]] Weights W: [[ 0.1 0.2 0.3] [ 0.1 0.2 0.3]] bias: [ 0.01 0.1 0.1 ] def net_input(X, W, b): return (X.dot(W) + b) net_in = net_input(X, W, bias) print('net input:\\n', net_in) net input: [[ 0.07 0.22 0.28] [ 0.35 0.78 1.12] [-0.33 -0.58 -0.92] [-0.39 -0.7 -1.1 ]] Now, it's time to compute the softmax activation that we discussed earlier: P(y=j \\mid z^{(i)}) = \\phi_{softmax}(z^{(i)}) = \\frac{e^{z^{(i)}}}{\\sum_{j=0}^{k} e^{z_{k}^{(i)}}}. def softmax(z): return (np.exp(z.T) / np.sum(np.exp(z), axis=1)).T smax = softmax(net_in) print('softmax:\\n', smax) softmax: [[ 0.29450637 0.34216758 0.36332605] [ 0.21290077 0.32728332 0.45981591] [ 0.42860913 0.33380113 0.23758974] [ 0.44941979 0.32962558 0.22095463]] As we can see, the values for each sample (row) nicely sum up to 1 now. E.g., we can say that the first sample [ 0.29450637 0.34216758 0.36332605] has a 29.45% probability to belong to class 0. Now, in order to turn these probabilities back into class labels, we could simply take the argmax-index position of each row: [[ 0.29450637 0.34216758 0.36332605 ] -> 2 [ 0.21290077 0.32728332 0.45981591 ] -> 2 [ 0.42860913 0.33380113 0.23758974] -> 0 [ 0.44941979 0.32962558 0.22095463]] -> 0 def to_classlabel(z): return z.argmax(axis=1) print('predicted class labels: ', to_classlabel(smax)) predicted class labels: [2 2 0 0] As we can see, our predictions are terribly wrong, since the correct class labels are [0, 1, 2, 2] . Now, in order to train our logistic model (e.g., via an optimization algorithm such as gradient descent), we need to define a cost function J(\\cdot) that we want to minimize: J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum_{i=1}^{n} H(T_i, O_i), which is the average of all cross-entropies over our n training samples. The cross-entropy function is defined as H(T_i, O_i) = -\\sum_m T_i \\cdot log(O_i). Here the T stands for \"target\" (i.e., the true class labels) and the O stands for output -- the computed probability via softmax; not the predicted class label. def cross_entropy(output, y_target): return - np.sum(np.log(output) * (y_target), axis=1) xent = cross_entropy(smax, y_enc) print('Cross Entropy:', xent) Cross Entropy: [ 1.22245465 1.11692907 1.43720989 1.50979788] def cost(output, y_target): return np.mean(cross_entropy(output, y_target)) J_cost = cost(smax, y_enc) print('Cost: ', J_cost) Cost: 1.32159787159 In order to learn our softmax model -- determining the weight coefficients -- via gradient descent, we then need to compute the derivative \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}). I don't want to walk through the tedious details here, but this cost derivative turns out to be simply: \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum^{n}_{i=0} \\big[\\mathbf{x}^{(i)}\\ \\big(O_i - T_i \\big) \\big] We can then use the cost derivate to update the weights in opposite direction of the cost gradient with learning rate \\eta : \\mathbf{w}_j := \\mathbf{w}_j - \\eta \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}) for each class j \\in \\{0, 1, ..., k\\} (note that \\mathbf{w}_j is the weight vector for the class y=j ), and we update the bias units \\mathbf{b}_j := \\mathbf{b}_j - \\eta \\bigg[ \\frac{1}{n} \\sum^{n}_{i=0} \\big(O_i - T_i \\big) \\bigg]. As a penalty against complexity, an approach to reduce the variance of our model and decrease the degree of overfitting by adding additional bias, we can further add a regularization term such as the L2 term with the regularization parameter \\lambda : L2: \\frac{\\lambda}{2} ||\\mathbf{w}||_{2}^{2} , where ||\\mathbf{w}||_{2}^{2} = \\sum^{m}_{l=0} \\sum^{k}_{j=0} w_{i, j} so that our cost function becomes J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum_{i=1}^{n} H(T_i, O_i) + \\frac{\\lambda}{2} ||\\mathbf{w}||_{2}^{2} and we define the \"regularized\" weight update as \\mathbf{w}_j := \\mathbf{w}_j - \\eta \\big[\\nabla \\mathbf{w}_j \\, J(\\mathbf{W}) + \\lambda \\mathbf{w}_j \\big]. (Please note that we don't regularize the bias term.) Example 1 - Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import SoftmaxRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = SoftmaxRegression(eta=0.01, epochs=500, minibatches=1, random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Softmax Regression - Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 500/500 | Cost 0.06 | Elapsed: 0:00:00 | ETA: 0:00:00 Predicting Class Labels y_pred = lr.predict(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [2 2 2] Predicting Class Probabilities y_pred = lr.predict_proba(X) print('Last 3 Class Labels:\\n %s' % y_pred[-3:]) Last 3 Class Labels: [[ 9.18728149e-09 1.68894679e-02 9.83110523e-01] [ 2.97052325e-11 7.26356627e-04 9.99273643e-01] [ 1.57464093e-06 1.57779528e-01 8.42218897e-01]] Example 2 - Stochastic Gradient Descent from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import SoftmaxRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = SoftmaxRegression(eta=0.01, epochs=300, minibatches=len(y), random_seed=1) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Softmax Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() API SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Softmax Regression"},{"location":"user_guide/classifier/SoftmaxRegression/#softmax-regression","text":"A logistic regression class for multi-class classification tasks. from mlxtend.classifier import SoftmaxRegression","title":"Softmax Regression"},{"location":"user_guide/classifier/SoftmaxRegression/#overview","text":"Softmax Regression (synonyms: Multinomial Logistic , Maximum Entropy Classifier , or just Multi-class Logistic Regression ) is a generalization of logistic regression that we can use for multi-class classification (under the assumption that the classes are mutually exclusive). In contrast, we use the (standard) Logistic Regression model in binary classification tasks. Below is a schematic of a Logistic Regression model, for more details, please see the LogisticRegression manual . In Softmax Regression (SMR), we replace the sigmoid logistic function by the so-called softmax function \\phi_{softmax}(\\cdot) . P(y=j \\mid z^{(i)}) = \\phi_{softmax}(z^{(i)}) = \\frac{e^{z^{(i)}}}{\\sum_{j=0}^{k} e^{z_{k}^{(i)}}}, where we define the net input z as z = w_1x_1 + ... + w_mx_m + b= \\sum_{l=1}^{m} w_l x_l + b= \\mathbf{w}^T\\mathbf{x} + b. ( w is the weight vector, \\mathbf{x} is the feature vector of 1 training sample, and b is the bias unit.) Now, this softmax function computes the probability that this training sample \\mathbf{x}^{(i)} belongs to class j given the weight and net input z^{(i)} . So, we compute the probability p(y = j \\mid \\mathbf{x^{(i)}; w}_j) for each class label in j = 1, \\ldots, k. . Note the normalization term in the denominator which causes these class probabilities to sum up to one. To illustrate the concept of softmax, let us walk through a concrete example. Let's assume we have a training set consisting of 4 samples from 3 different classes (0, 1, and 2) x_0 \\rightarrow \\text{class }0 x_1 \\rightarrow \\text{class }1 x_2 \\rightarrow \\text{class }2 x_3 \\rightarrow \\text{class }2 import numpy as np y = np.array([0, 1, 2, 2]) First, we want to encode the class labels into a format that we can more easily work with; we apply one-hot encoding: y_enc = (np.arange(np.max(y) + 1) == y[:, None]).astype(float) print('one-hot encoding:\\n', y_enc) one-hot encoding: [[ 1. 0. 0.] [ 0. 1. 0.] [ 0. 0. 1.] [ 0. 0. 1.]] A sample that belongs to class 0 (the first row) has a 1 in the first cell, a sample that belongs to class 2 has a 1 in the second cell of its row, and so forth. Next, let us define the feature matrix of our 4 training samples. Here, we assume that our dataset consists of 2 features; thus, we create a 4x2 dimensional matrix of our samples and features. Similarly, we create a 2x3 dimensional weight matrix (one row per feature and one column for each class). X = np.array([[0.1, 0.5], [1.1, 2.3], [-1.1, -2.3], [-1.5, -2.5]]) W = np.array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]) bias = np.array([0.01, 0.1, 0.1]) print('Inputs X:\\n', X) print('\\nWeights W:\\n', W) print('\\nbias:\\n', bias) Inputs X: [[ 0.1 0.5] [ 1.1 2.3] [-1.1 -2.3] [-1.5 -2.5]] Weights W: [[ 0.1 0.2 0.3] [ 0.1 0.2 0.3]] bias: [ 0.01 0.1 0.1 ] To compute the net input, we multiply the 4x2 matrix feature matrix X with the 2x3 (n_features x n_classes) weight matrix W , which yields a 4x3 output matrix (n_samples x n_classes) to which we then add the bias unit: \\mathbf{Z} = \\mathbf{X}\\mathbf{W} + \\mathbf{b}. X = np.array([[0.1, 0.5], [1.1, 2.3], [-1.1, -2.3], [-1.5, -2.5]]) W = np.array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]) bias = np.array([0.01, 0.1, 0.1]) print('Inputs X:\\n', X) print('\\nWeights W:\\n', W) print('\\nbias:\\n', bias) Inputs X: [[ 0.1 0.5] [ 1.1 2.3] [-1.1 -2.3] [-1.5 -2.5]] Weights W: [[ 0.1 0.2 0.3] [ 0.1 0.2 0.3]] bias: [ 0.01 0.1 0.1 ] def net_input(X, W, b): return (X.dot(W) + b) net_in = net_input(X, W, bias) print('net input:\\n', net_in) net input: [[ 0.07 0.22 0.28] [ 0.35 0.78 1.12] [-0.33 -0.58 -0.92] [-0.39 -0.7 -1.1 ]] Now, it's time to compute the softmax activation that we discussed earlier: P(y=j \\mid z^{(i)}) = \\phi_{softmax}(z^{(i)}) = \\frac{e^{z^{(i)}}}{\\sum_{j=0}^{k} e^{z_{k}^{(i)}}}. def softmax(z): return (np.exp(z.T) / np.sum(np.exp(z), axis=1)).T smax = softmax(net_in) print('softmax:\\n', smax) softmax: [[ 0.29450637 0.34216758 0.36332605] [ 0.21290077 0.32728332 0.45981591] [ 0.42860913 0.33380113 0.23758974] [ 0.44941979 0.32962558 0.22095463]] As we can see, the values for each sample (row) nicely sum up to 1 now. E.g., we can say that the first sample [ 0.29450637 0.34216758 0.36332605] has a 29.45% probability to belong to class 0. Now, in order to turn these probabilities back into class labels, we could simply take the argmax-index position of each row: [[ 0.29450637 0.34216758 0.36332605 ] -> 2 [ 0.21290077 0.32728332 0.45981591 ] -> 2 [ 0.42860913 0.33380113 0.23758974] -> 0 [ 0.44941979 0.32962558 0.22095463]] -> 0 def to_classlabel(z): return z.argmax(axis=1) print('predicted class labels: ', to_classlabel(smax)) predicted class labels: [2 2 0 0] As we can see, our predictions are terribly wrong, since the correct class labels are [0, 1, 2, 2] . Now, in order to train our logistic model (e.g., via an optimization algorithm such as gradient descent), we need to define a cost function J(\\cdot) that we want to minimize: J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum_{i=1}^{n} H(T_i, O_i), which is the average of all cross-entropies over our n training samples. The cross-entropy function is defined as H(T_i, O_i) = -\\sum_m T_i \\cdot log(O_i). Here the T stands for \"target\" (i.e., the true class labels) and the O stands for output -- the computed probability via softmax; not the predicted class label. def cross_entropy(output, y_target): return - np.sum(np.log(output) * (y_target), axis=1) xent = cross_entropy(smax, y_enc) print('Cross Entropy:', xent) Cross Entropy: [ 1.22245465 1.11692907 1.43720989 1.50979788] def cost(output, y_target): return np.mean(cross_entropy(output, y_target)) J_cost = cost(smax, y_enc) print('Cost: ', J_cost) Cost: 1.32159787159 In order to learn our softmax model -- determining the weight coefficients -- via gradient descent, we then need to compute the derivative \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}). I don't want to walk through the tedious details here, but this cost derivative turns out to be simply: \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum^{n}_{i=0} \\big[\\mathbf{x}^{(i)}\\ \\big(O_i - T_i \\big) \\big] We can then use the cost derivate to update the weights in opposite direction of the cost gradient with learning rate \\eta : \\mathbf{w}_j := \\mathbf{w}_j - \\eta \\nabla \\mathbf{w}_j \\, J(\\mathbf{W}; \\mathbf{b}) for each class j \\in \\{0, 1, ..., k\\} (note that \\mathbf{w}_j is the weight vector for the class y=j ), and we update the bias units \\mathbf{b}_j := \\mathbf{b}_j - \\eta \\bigg[ \\frac{1}{n} \\sum^{n}_{i=0} \\big(O_i - T_i \\big) \\bigg]. As a penalty against complexity, an approach to reduce the variance of our model and decrease the degree of overfitting by adding additional bias, we can further add a regularization term such as the L2 term with the regularization parameter \\lambda : L2: \\frac{\\lambda}{2} ||\\mathbf{w}||_{2}^{2} , where ||\\mathbf{w}||_{2}^{2} = \\sum^{m}_{l=0} \\sum^{k}_{j=0} w_{i, j} so that our cost function becomes J(\\mathbf{W}; \\mathbf{b}) = \\frac{1}{n} \\sum_{i=1}^{n} H(T_i, O_i) + \\frac{\\lambda}{2} ||\\mathbf{w}||_{2}^{2} and we define the \"regularized\" weight update as \\mathbf{w}_j := \\mathbf{w}_j - \\eta \\big[\\nabla \\mathbf{w}_j \\, J(\\mathbf{W}) + \\lambda \\mathbf{w}_j \\big]. (Please note that we don't regularize the bias term.)","title":"Overview"},{"location":"user_guide/classifier/SoftmaxRegression/#example-1-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import SoftmaxRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = SoftmaxRegression(eta=0.01, epochs=500, minibatches=1, random_seed=1, print_progress=3) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Softmax Regression - Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show() Iteration: 500/500 | Cost 0.06 | Elapsed: 0:00:00 | ETA: 0:00:00","title":"Example 1 - Gradient Descent"},{"location":"user_guide/classifier/SoftmaxRegression/#predicting-class-labels","text":"y_pred = lr.predict(X) print('Last 3 Class Labels: %s' % y_pred[-3:]) Last 3 Class Labels: [2 2 2]","title":"Predicting Class Labels"},{"location":"user_guide/classifier/SoftmaxRegression/#predicting-class-probabilities","text":"y_pred = lr.predict_proba(X) print('Last 3 Class Labels:\\n %s' % y_pred[-3:]) Last 3 Class Labels: [[ 9.18728149e-09 1.68894679e-02 9.83110523e-01] [ 2.97052325e-11 7.26356627e-04 9.99273643e-01] [ 1.57464093e-06 1.57779528e-01 8.42218897e-01]]","title":"Predicting Class Probabilities"},{"location":"user_guide/classifier/SoftmaxRegression/#example-2-stochastic-gradient-descent","text":"from mlxtend.data import iris_data from mlxtend.plotting import plot_decision_regions from mlxtend.classifier import SoftmaxRegression import matplotlib.pyplot as plt # Loading Data X, y = iris_data() X = X[:, [0, 3]] # sepal length and petal width # standardize X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std() X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std() lr = SoftmaxRegression(eta=0.01, epochs=300, minibatches=len(y), random_seed=1) lr.fit(X, y) plot_decision_regions(X, y, clf=lr) plt.title('Softmax Regression - Stochastic Gradient Descent') plt.show() plt.plot(range(len(lr.cost_)), lr.cost_) plt.xlabel('Iterations') plt.ylabel('Cost') plt.show()","title":"Example 2 - Stochastic Gradient Descent"},{"location":"user_guide/classifier/SoftmaxRegression/#api","text":"SoftmaxRegression(eta=0.01, epochs=50, l2=0.0, minibatches=1, n_classes=None, random_seed=None, print_progress=0) Softmax regression classifier. Parameters eta : float (default: 0.01) Learning rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. l2 : float Regularization parameter for L2 regularization. No regularization if l2=0.0. minibatches : int (default: 1) The number of minibatches for gradient-based optimization. If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent (SGD) online learning If 1 < minibatches < len(y): SGD Minibatch learning n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list List of floats, the average cross_entropy for each epoch. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/","title":"API"},{"location":"user_guide/classifier/SoftmaxRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. predict_proba(X) Predict class probabilities of X from the net input. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns Class probabilties : array-like, shape= [n_samples, n_classes] score(X, y) Compute the prediction accuracy Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (true class labels). Returns acc : float The prediction accuracy as a float between 0.0 and 1.0 (perfect score).","title":"Methods"},{"location":"user_guide/classifier/StackingCVClassifier/","text":"StackingCVClassifier An ensemble-learning meta-classifier for stacking using cross-validation to prepare the inputs for the level-2 classifier to prevent overfitting. from mlxtend.classifier import StackingCVClassifier Overview Stacking is an ensemble learning technique to combine multiple classification models via a meta-classifier. The StackingCVClassifier extends the standard stacking algorithm (implemented as StackingClassifier ) using cross-validation to prepare the input data for the level-2 classifier. In the standard stacking procedure, the first-level classifiers are fit to the same training set that is used prepare the inputs for the second-level classifier, which may lead to overfitting. The StackingCVClassifier , however, uses the concept of cross-validation: the dataset is split into k folds, and in k successive rounds, k-1 folds are used to fit the first level classifier; in each round, the first-level classifiers are then applied to the remaining 1 subset that was not used for model fitting in each iteration. The resulting predictions are then stacked and provided -- as input data -- to the second-level classifier. After the training of the StackingCVClassifier , the first-level classifiers are fit to the entire dataset as illustrated in the figure below. More formally, the Stacking Cross-Validation algorithm can be summarized as follows (source: [1]): References [1] Tang, J., S. Alelyani, and H. Liu. \" Data Classification: Algorithms and Applications. \" Data Mining and Knowledge Discovery Series, CRC Press (2015): pp. 498-500. [2] Wolpert, David H. \" Stacked generalization. \" Neural networks 5.2 (1992): 241-259. Example 1 - Simple Stacking CV Classification from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import StackingCVClassifier import numpy as np RANDOM_SEED = 42 clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.90 (+/- 0.03) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.93 (+/- 0.02) [StackingClassifier] import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) for clf, lab, grd in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingCVClassifier'], itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) plt.show() Example 2 - Using Probabilities as Meta-Features Alternatively, the class-probabilities of the first-level classifiers can be used to train the meta-classifier (2nd-level classifier) by setting use_probas=True . For example, in a 3-class setting with 2 level-1 classifiers, these classifiers may make the following \"probability\" predictions for 1 training sample: classifier 1: [0.2, 0.5, 0.3] classifier 2: [0.3, 0.4, 0.4] This results in k features, where k = [n_classes * n_classifiers], by stacking these level-1 probabilities: [0.2, 0.5, 0.3, 0.3, 0.4, 0.4] clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], use_probas=True, meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.95 (+/- 0.04) [StackingClassifier] Example 3 - Stacked CV Classification and GridSearch To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from mlxtend.classifier import StackingCVClassifier # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.673 +/- 0.01 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.920 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.893 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.947 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.947 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} Accuracy: 0.95 In case we are planning to use a regression algorithm multiple times, all we need to do is to add an additional number suffix in the parameter grid as shown below: from sklearn.model_selection import GridSearchCV # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier-1__n_neighbors': [1, 5], 'kneighborsclassifier-2__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.673 +/- 0.01 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.920 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.893 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.947 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.953 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.927 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.940 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} Accuracy: 0.95 Note The StackingCVClassifier also enables grid search over the classifiers argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'classifiers': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] . Example 4 - Stacking of Classifiers that Operate on Different Feature Subsets The different level-1 classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import StackingCVClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) sclf = StackingCVClassifier(classifiers=[pipe1, pipe2], meta_classifier=LogisticRegression()) sclf.fit(X, y) StackingCVClassifier(classifiers=[Pipeline(steps=[('columnselector', ColumnSelector(cols=(0, 2))), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solve...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], cv=2, meta_classifier=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False), shuffle=True, stratify=True, use_features_in_secondary=False, use_probas=False, verbose=0) API StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingCVClassifier"},{"location":"user_guide/classifier/StackingCVClassifier/#stackingcvclassifier","text":"An ensemble-learning meta-classifier for stacking using cross-validation to prepare the inputs for the level-2 classifier to prevent overfitting. from mlxtend.classifier import StackingCVClassifier","title":"StackingCVClassifier"},{"location":"user_guide/classifier/StackingCVClassifier/#overview","text":"Stacking is an ensemble learning technique to combine multiple classification models via a meta-classifier. The StackingCVClassifier extends the standard stacking algorithm (implemented as StackingClassifier ) using cross-validation to prepare the input data for the level-2 classifier. In the standard stacking procedure, the first-level classifiers are fit to the same training set that is used prepare the inputs for the second-level classifier, which may lead to overfitting. The StackingCVClassifier , however, uses the concept of cross-validation: the dataset is split into k folds, and in k successive rounds, k-1 folds are used to fit the first level classifier; in each round, the first-level classifiers are then applied to the remaining 1 subset that was not used for model fitting in each iteration. The resulting predictions are then stacked and provided -- as input data -- to the second-level classifier. After the training of the StackingCVClassifier , the first-level classifiers are fit to the entire dataset as illustrated in the figure below. More formally, the Stacking Cross-Validation algorithm can be summarized as follows (source: [1]):","title":"Overview"},{"location":"user_guide/classifier/StackingCVClassifier/#references","text":"[1] Tang, J., S. Alelyani, and H. Liu. \" Data Classification: Algorithms and Applications. \" Data Mining and Knowledge Discovery Series, CRC Press (2015): pp. 498-500. [2] Wolpert, David H. \" Stacked generalization. \" Neural networks 5.2 (1992): 241-259.","title":"References"},{"location":"user_guide/classifier/StackingCVClassifier/#example-1-simple-stacking-cv-classification","text":"from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import StackingCVClassifier import numpy as np RANDOM_SEED = 42 clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.90 (+/- 0.03) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.93 (+/- 0.02) [StackingClassifier] import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) for clf, lab, grd in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingCVClassifier'], itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) plt.show()","title":"Example 1 - Simple Stacking CV Classification"},{"location":"user_guide/classifier/StackingCVClassifier/#example-2-using-probabilities-as-meta-features","text":"Alternatively, the class-probabilities of the first-level classifiers can be used to train the meta-classifier (2nd-level classifier) by setting use_probas=True . For example, in a 3-class setting with 2 level-1 classifiers, these classifiers may make the following \"probability\" predictions for 1 training sample: classifier 1: [0.2, 0.5, 0.3] classifier 2: [0.3, 0.4, 0.4] This results in k features, where k = [n_classes * n_classifiers], by stacking these level-1 probabilities: [0.2, 0.5, 0.3, 0.3, 0.4, 0.4] clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], use_probas=True, meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.95 (+/- 0.04) [StackingClassifier]","title":"Example 2 - Using Probabilities as Meta-Features"},{"location":"user_guide/classifier/StackingCVClassifier/#example-3-stacked-cv-classification-and-gridsearch","text":"To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from mlxtend.classifier import StackingCVClassifier # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.673 +/- 0.01 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.920 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.893 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.947 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.947 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} Accuracy: 0.95 In case we are planning to use a regression algorithm multiple times, all we need to do is to add an additional number suffix in the parameter grid as shown below: from sklearn.model_selection import GridSearchCV # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=RANDOM_SEED) clf3 = GaussianNB() lr = LogisticRegression() # The StackingCVClassifier uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) sclf = StackingCVClassifier(classifiers=[clf1, clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier-1__n_neighbors': [1, 5], 'kneighborsclassifier-2__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.673 +/- 0.01 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.920 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.893 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.947 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.953 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.927 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.940 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} Accuracy: 0.95 Note The StackingCVClassifier also enables grid search over the classifiers argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'classifiers': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] .","title":"Example 3 - Stacked CV Classification and GridSearch"},{"location":"user_guide/classifier/StackingCVClassifier/#example-4-stacking-of-classifiers-that-operate-on-different-feature-subsets","text":"The different level-1 classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import StackingCVClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) sclf = StackingCVClassifier(classifiers=[pipe1, pipe2], meta_classifier=LogisticRegression()) sclf.fit(X, y) StackingCVClassifier(classifiers=[Pipeline(steps=[('columnselector', ColumnSelector(cols=(0, 2))), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solve...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], cv=2, meta_classifier=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False), shuffle=True, stratify=True, use_features_in_secondary=False, use_probas=False, verbose=0)","title":"Example 4 - Stacking of Classifiers that Operate on Different Feature Subsets"},{"location":"user_guide/classifier/StackingCVClassifier/#api","text":"StackingCVClassifier(classifiers, meta_classifier, use_probas=False, cv=2, use_features_in_secondary=False, stratify=True, shuffle=True, verbose=0, store_train_meta_features=False, use_clones=True) A 'Stacking Cross-Validation' classifier for scikit-learn estimators. New in mlxtend v0.4.3 Notes The StackingCVClassifier uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVClassifier Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingCVClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. cv : int, cross-validation generator or an iterable, optional (default: 2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 2-fold cross validation, - integer, to specify the number of folds in a (Stratified)KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use either a KFold or StratifiedKFold cross validation depending the value of stratify argument. use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. stratify : bool (default: True) If True, and the cv argument is integer it will follow a stratified K-Fold cross validation technique. If the cv argument is a specific cross validation technique, this argument is omitted. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted and which fold is currently being used for fitting - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingCVClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/","title":"API"},{"location":"user_guide/classifier/StackingCVClassifier/#methods","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"user_guide/classifier/StackingClassifier/","text":"StackingClassifier An ensemble-learning meta-classifier for stacking. from mlxtend.classifier import StackingClassifier Overview Stacking is an ensemble learning technique to combine multiple classification models via a meta-classifier. The individual classification models are trained based on the complete training set; then, the meta-classifier is fitted based on the outputs -- meta-features -- of the individual classification models in the ensemble. The meta-classifier can either be trained on the predicted class labels or probabilities from the ensemble. The algorithm can be summarized as follows (source: [1]): Please note that this type of Stacking is prone to overfitting due to information leakage. The related StackingCVClassifier.md does not derive the predictions for the 2nd-level classifier from the same datast that was used for training the level-1 classifiers and is recommended instead. References [1] Tang, J., S. Alelyani, and H. Liu. \" Data Classification: Algorithms and Applications. \" Data Mining and Knowledge Discovery Series, CRC Press (2015): pp. 498-500. [2] Wolpert, David H. \" Stacked generalization. \" Neural networks 5.2 (1992): 241-259. Example 1 - Simple Stacked Classification from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import StackingClassifier import numpy as np clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.95 (+/- 0.03) [StackingClassifier] import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) for clf, lab, grd in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier'], itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab) Example 2 - Using Probabilities as Meta-Features Alternatively, the class-probabilities of the first-level classifiers can be used to train the meta-classifier (2nd-level classifier) by setting use_probas=True . If average_probas=True , the probabilities of the level-1 classifiers are averaged, if average_probas=False , the probabilities are stacked (recommended). For example, in a 3-class setting with 2 level-1 classifiers, these classifiers may make the following \"probability\" predictions for 1 training sample: classifier 1: [0.2, 0.5, 0.3] classifier 2: [0.3, 0.4, 0.4] If average_probas=True , the meta-features would be: [0.25, 0.45, 0.35] In contrast, using average_probas=False results in k features where, k = [n_classes * n_classifiers], by stacking these level-1 probabilities: [0.2, 0.5, 0.3, 0.3, 0.4, 0.4] clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], use_probas=True, average_probas=False, meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.94 (+/- 0.03) [StackingClassifier] Example 3 - Stacked Classification and GridSearch To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from mlxtend.classifier import StackingClassifier # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.933 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Accuracy: 0.94 In case we are planning to use a regression algorithm multiple times, all we need to do is to add an additional number suffix in the parameter grid as shown below: from sklearn.model_selection import GridSearchCV # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier-1__n_neighbors': [1, 5], 'kneighborsclassifier-2__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.907 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.933 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Accuracy: 0.94 Note The StackingClassifier also enables grid search over the classifiers argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'classifiers': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] . Example 4 - Stacking of Classifiers that Operate on Different Feature Subsets The different level-1 classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import StackingClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) sclf = StackingClassifier(classifiers=[pipe1, pipe2], meta_classifier=LogisticRegression()) sclf.fit(X, y) StackingClassifier(average_probas=False, classifiers=[Pipeline(steps=[('columnselector', ColumnSelector(cols=(0, 2))), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solve...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], meta_classifier=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False), use_features_in_secondary=False, use_probas=False, verbose=0) API StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/ Methods fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingClassifier"},{"location":"user_guide/classifier/StackingClassifier/#stackingclassifier","text":"An ensemble-learning meta-classifier for stacking. from mlxtend.classifier import StackingClassifier","title":"StackingClassifier"},{"location":"user_guide/classifier/StackingClassifier/#overview","text":"Stacking is an ensemble learning technique to combine multiple classification models via a meta-classifier. The individual classification models are trained based on the complete training set; then, the meta-classifier is fitted based on the outputs -- meta-features -- of the individual classification models in the ensemble. The meta-classifier can either be trained on the predicted class labels or probabilities from the ensemble. The algorithm can be summarized as follows (source: [1]): Please note that this type of Stacking is prone to overfitting due to information leakage. The related StackingCVClassifier.md does not derive the predictions for the 2nd-level classifier from the same datast that was used for training the level-1 classifiers and is recommended instead.","title":"Overview"},{"location":"user_guide/classifier/StackingClassifier/#references","text":"[1] Tang, J., S. Alelyani, and H. Liu. \" Data Classification: Algorithms and Applications. \" Data Mining and Knowledge Discovery Series, CRC Press (2015): pp. 498-500. [2] Wolpert, David H. \" Stacked generalization. \" Neural networks 5.2 (1992): 241-259.","title":"References"},{"location":"user_guide/classifier/StackingClassifier/#example-1-simple-stacked-classification","text":"from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import model_selection from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mlxtend.classifier import StackingClassifier import numpy as np clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.95 (+/- 0.03) [StackingClassifier] import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) for clf, lab, grd in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier'], itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf) plt.title(lab)","title":"Example 1 - Simple Stacked Classification"},{"location":"user_guide/classifier/StackingClassifier/#example-2-using-probabilities-as-meta-features","text":"Alternatively, the class-probabilities of the first-level classifiers can be used to train the meta-classifier (2nd-level classifier) by setting use_probas=True . If average_probas=True , the probabilities of the level-1 classifiers are averaged, if average_probas=False , the probabilities are stacked (recommended). For example, in a 3-class setting with 2 level-1 classifiers, these classifiers may make the following \"probability\" predictions for 1 training sample: classifier 1: [0.2, 0.5, 0.3] classifier 2: [0.3, 0.4, 0.4] If average_probas=True , the meta-features would be: [0.25, 0.45, 0.35] In contrast, using average_probas=False results in k features where, k = [n_classes * n_classifiers], by stacking these level-1 probabilities: [0.2, 0.5, 0.3, 0.3, 0.4, 0.4] clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], use_probas=True, average_probas=False, meta_classifier=lr) print('3-fold cross validation:\\n') for clf, label in zip([clf1, clf2, clf3, sclf], ['KNN', 'Random Forest', 'Naive Bayes', 'StackingClassifier']): scores = model_selection.cross_val_score(clf, X, y, cv=3, scoring='accuracy') print(\"Accuracy: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label)) 3-fold cross validation: Accuracy: 0.91 (+/- 0.01) [KNN] Accuracy: 0.91 (+/- 0.06) [Random Forest] Accuracy: 0.92 (+/- 0.03) [Naive Bayes] Accuracy: 0.94 (+/- 0.03) [StackingClassifier]","title":"Example 2 - Using Probabilities as Meta-Features"},{"location":"user_guide/classifier/StackingClassifier/#example-3-stacked-classification-and-gridsearch","text":"To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from mlxtend.classifier import StackingClassifier # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.933 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Accuracy: 0.94 In case we are planning to use a regression algorithm multiple times, all we need to do is to add an additional number suffix in the parameter grid as shown below: from sklearn.model_selection import GridSearchCV # Initializing models clf1 = KNeighborsClassifier(n_neighbors=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() lr = LogisticRegression() sclf = StackingClassifier(classifiers=[clf1, clf1, clf2, clf3], meta_classifier=lr) params = {'kneighborsclassifier-1__n_neighbors': [1, 5], 'kneighborsclassifier-2__n_neighbors': [1, 5], 'randomforestclassifier__n_estimators': [10, 50], 'meta-logisticregression__C': [0.1, 10.0]} grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, refit=True) grid.fit(X, y) cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.907 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 1, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.927 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.913 +/- 0.03 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 1, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 10} 0.667 +/- 0.00 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 0.1, 'randomforestclassifier__n_estimators': 50} 0.933 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 10} 0.940 +/- 0.02 {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Best parameters: {'kneighborsclassifier-1__n_neighbors': 5, 'kneighborsclassifier-2__n_neighbors': 5, 'meta-logisticregression__C': 10.0, 'randomforestclassifier__n_estimators': 50} Accuracy: 0.94 Note The StackingClassifier also enables grid search over the classifiers argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestclassifier__n_estimators': [1, 100], 'classifiers': [(clf1, clf1, clf1), (clf2, clf3)]} it will use the instance settings of clf1 , clf2 , and clf3 and not overwrite it with the 'n_estimators' settings from 'randomforestclassifier__n_estimators': [1, 100] .","title":"Example 3 - Stacked Classification and GridSearch"},{"location":"user_guide/classifier/StackingClassifier/#example-4-stacking-of-classifiers-that-operate-on-different-feature-subsets","text":"The different level-1 classifiers can be fit to different subsets of features in the training dataset. The following example illustrates how this can be done on a technical level using scikit-learn pipelines and the ColumnSelector : from sklearn.datasets import load_iris from mlxtend.classifier import StackingClassifier from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression iris = load_iris() X = iris.data y = iris.target pipe1 = make_pipeline(ColumnSelector(cols=(0, 2)), LogisticRegression()) pipe2 = make_pipeline(ColumnSelector(cols=(1, 2, 3)), LogisticRegression()) sclf = StackingClassifier(classifiers=[pipe1, pipe2], meta_classifier=LogisticRegression()) sclf.fit(X, y) StackingClassifier(average_probas=False, classifiers=[Pipeline(steps=[('columnselector', ColumnSelector(cols=(0, 2))), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solve...='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False))])], meta_classifier=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='liblinear', tol=0.0001, verbose=0, warm_start=False), use_features_in_secondary=False, use_probas=False, verbose=0)","title":"Example 4 - Stacking of Classifiers that Operate on Different Feature Subsets"},{"location":"user_guide/classifier/StackingClassifier/#api","text":"StackingClassifier(classifiers, meta_classifier, use_probas=False, average_probas=False, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, use_clones=True) A Stacking classifier for scikit-learn estimators for classification. Parameters classifiers : array-like, shape = [n_classifiers] A list of classifiers. Invoking the fit method on the StackingClassifer will fit clones of these original classifiers that will be stored in the class attribute self.clfs_ . meta_classifier : object The meta-classifier to be fitted on the ensemble of classifiers use_probas : bool (default: False) If True, trains meta-classifier based on predicted probabilities instead of class labels. average_probas : bool (default: False) Averages the probabilities as meta features if True. verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-classifier will be trained both on the predictions of the original classifiers and the original dataset. If False, the meta-classifier will be trained only on the predictions of the original classifiers. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-classifier stored in the self.train_meta_features_ array, which can be accessed after calling fit . use_clones : bool (default: True) Clones the classifiers for stacking classification if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Hence, if use_clones=True, the original input classifiers will remain unmodified upon using the StackingClassifier's fit method. Setting use_clones=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes clfs_ : list, shape=[n_classifiers] Fitted classifiers (clones of the original classifiers) meta_clf_ : estimator Fitted meta-classifier (clone of the original meta-estimator) train_meta_features : numpy array, shape = [n_samples, n_classifiers] meta-features for training data, where n_samples is the number of samples in training data and n_classifiers is the number of classfiers. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/","title":"API"},{"location":"user_guide/classifier/StackingClassifier/#methods","text":"fit(X, y, sample_weight=None) Fit ensemble classifers and the meta-classifier. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns labels : array-like, shape = [n_samples] or [n_samples, n_outputs] Predicted class labels. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, n_classifiers] Returns the meta-features for test data. predict_proba(X) Predict class probabilities for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns proba : array-like, shape = [n_samples, n_classes] or a list of n_outputs of such arrays if n_outputs > 1. Probability for each class per sample. score(X, y, sample_weight=None) Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"user_guide/cluster/Kmeans/","text":"Kmeans A implementation of k-means clustering. from mlxtend.cluster import Kmeans Overview Clustering falls into the category of unsupervised learning, a subfield of machine learning where the ground truth labels are not available to us in real-world applications. In clustering, our goal is to group samples by similarity (in k-means: Euclidean distance). The k-means algorithms can be summarized as follows: Randomly pick k centroids from the sample points as initial cluster centers. Assign each sample to the nearest centroid \\mu(j), \\; j \\in {1,...,k} . Move the centroids to the center of the samples that were assigned to it. Repeat steps 2 and 3 until the cluster assignments do not change or a user-defined tolerance or a maximum number of iterations is reached. References MacQueen, J. B. (1967). Some Methods for classification and Analysis of Multivariate Observations . Proceedings of 5th Berkeley Symposium on Mathematical Statistics and Probability. University of California Press. pp. 281\u2013297. MR 0214227. Zbl 0214.46201. Retrieved 2009-04-07. Example 1 - Three Blobs Load some sample data: import matplotlib.pyplot as plt from mlxtend.data import three_blobs_data X, y = three_blobs_data() plt.scatter(X[:, 0], X[:, 1], c='white') plt.show() Compute the cluster centroids: from mlxtend.cluster import Kmeans km = Kmeans(k=3, max_iter=50, random_seed=1, print_progress=3) km.fit(X) print('Iterations until convergence:', km.iterations_) print('Final centroids:\\n', km.centroids_) Iteration: 2/50 | Elapsed: 00:00:00 | ETA: 00:00:00 Iterations until convergence: 2 Final centroids: [[-1.5947298 2.92236966] [ 2.06521743 0.96137409] [ 0.9329651 4.35420713]] Visualize the cluster memberships: y_clust = km.predict(X) plt.scatter(X[y_clust == 0, 0], X[y_clust == 0, 1], s=50, c='lightgreen', marker='s', label='cluster 1') plt.scatter(X[y_clust == 1,0], X[y_clust == 1,1], s=50, c='orange', marker='o', label='cluster 2') plt.scatter(X[y_clust == 2,0], X[y_clust == 2,1], s=50, c='lightblue', marker='v', label='cluster 3') plt.scatter(km.centroids_[:,0], km.centroids_[:,1], s=250, marker='*', c='red', label='centroids') plt.legend(loc='lower left', scatterpoints=1) plt.grid() plt.show() API Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/ Methods fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values.","title":"Kmeans"},{"location":"user_guide/cluster/Kmeans/#kmeans","text":"A implementation of k-means clustering. from mlxtend.cluster import Kmeans","title":"Kmeans"},{"location":"user_guide/cluster/Kmeans/#overview","text":"Clustering falls into the category of unsupervised learning, a subfield of machine learning where the ground truth labels are not available to us in real-world applications. In clustering, our goal is to group samples by similarity (in k-means: Euclidean distance). The k-means algorithms can be summarized as follows: Randomly pick k centroids from the sample points as initial cluster centers. Assign each sample to the nearest centroid \\mu(j), \\; j \\in {1,...,k} . Move the centroids to the center of the samples that were assigned to it. Repeat steps 2 and 3 until the cluster assignments do not change or a user-defined tolerance or a maximum number of iterations is reached.","title":"Overview"},{"location":"user_guide/cluster/Kmeans/#references","text":"MacQueen, J. B. (1967). Some Methods for classification and Analysis of Multivariate Observations . Proceedings of 5th Berkeley Symposium on Mathematical Statistics and Probability. University of California Press. pp. 281\u2013297. MR 0214227. Zbl 0214.46201. Retrieved 2009-04-07.","title":"References"},{"location":"user_guide/cluster/Kmeans/#example-1-three-blobs","text":"","title":"Example 1 - Three Blobs"},{"location":"user_guide/cluster/Kmeans/#load-some-sample-data","text":"import matplotlib.pyplot as plt from mlxtend.data import three_blobs_data X, y = three_blobs_data() plt.scatter(X[:, 0], X[:, 1], c='white') plt.show()","title":"Load some sample data:"},{"location":"user_guide/cluster/Kmeans/#compute-the-cluster-centroids","text":"from mlxtend.cluster import Kmeans km = Kmeans(k=3, max_iter=50, random_seed=1, print_progress=3) km.fit(X) print('Iterations until convergence:', km.iterations_) print('Final centroids:\\n', km.centroids_) Iteration: 2/50 | Elapsed: 00:00:00 | ETA: 00:00:00 Iterations until convergence: 2 Final centroids: [[-1.5947298 2.92236966] [ 2.06521743 0.96137409] [ 0.9329651 4.35420713]]","title":"Compute the cluster centroids:"},{"location":"user_guide/cluster/Kmeans/#visualize-the-cluster-memberships","text":"y_clust = km.predict(X) plt.scatter(X[y_clust == 0, 0], X[y_clust == 0, 1], s=50, c='lightgreen', marker='s', label='cluster 1') plt.scatter(X[y_clust == 1,0], X[y_clust == 1,1], s=50, c='orange', marker='o', label='cluster 2') plt.scatter(X[y_clust == 2,0], X[y_clust == 2,1], s=50, c='lightblue', marker='v', label='cluster 3') plt.scatter(km.centroids_[:,0], km.centroids_[:,1], s=250, marker='*', c='red', label='centroids') plt.legend(loc='lower left', scatterpoints=1) plt.grid() plt.show()","title":"Visualize the cluster memberships:"},{"location":"user_guide/cluster/Kmeans/#api","text":"Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0) K-means clustering class. Added in 0.4.1dev Parameters k : int Number of clusters max_iter : int (default: 10) Number of iterations during cluster assignment. Cluster re-assignment stops automatically when the algorithm converged. convergence_tolerance : float (default: 1e-05) Compares current centroids with centroids of the previous iteration using the given tolerance (a small positive float)to determine if the algorithm converged early. random_seed : int (default: None) Set random state for the initial centroid assignment. print_progress : int (default: 0) Prints progress in fitting to stderr. 0: No output 1: Iterations elapsed 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes centroids_ : 2d-array, shape={k, n_features} Feature values of the k cluster centroids. custers_ : dictionary The cluster assignments stored as a Python dictionary; the dictionary keys denote the cluster indeces and the items are Python lists of the sample indices that were assigned to each cluster. iterations_ : int Number of iterations until convergence. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/classifier/Kmeans/","title":"API"},{"location":"user_guide/cluster/Kmeans/#methods","text":"fit(X, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values.","title":"Methods"},{"location":"user_guide/data/autompg_data/","text":"Auto MPG A function that loads the autompg dataset into NumPy arrays. from mlxtend.data import autompg_data Overview The Auto-MPG dataset for regression analysis. The target ( y ) is defined as the miles per gallon (mpg) for 392 automobiles (6 rows containing \"NaN\"s have been removed. The 8 feature columns are: Features cylinders: multi-valued discrete displacement: continuous horsepower: continuous weight: continuous acceleration: continuous model year: multi-valued discrete origin: multi-valued discrete car name: string (unique for each instance) Number of samples: 392 Target variable (continuous): mpg References Source: https://archive.ics.uci.edu/ml/datasets/Auto+MPG Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann. Example - Dataset overview from mlxtend.data import autompg_data X, y = autompg_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin', 'car name']) print('1st row', X[0]) Dimensions: 392 x 8 Header: ['cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin', 'car name'] 1st row [ 8.00000000e+00 3.07000000e+02 1.30000000e+02 3.50400000e+03 1.20000000e+01 7.00000000e+01 1.00000000e+00 nan] Note that the feature array contains a str column (\"car name\"), thus it is recommended to pick the features as needed and convert it into a float array for further analysis. The example below shows how to get rid of the car name column and cast the NumPy array as a float array. X[:, :-1].astype(float) array([[ 8. , 307. , 130. , ..., 12. , 70. , 1. ], [ 8. , 350. , 165. , ..., 11.5, 70. , 1. ], [ 8. , 318. , 150. , ..., 11. , 70. , 1. ], ..., [ 4. , 135. , 84. , ..., 11.6, 82. , 1. ], [ 4. , 120. , 79. , ..., 18.6, 82. , 1. ], [ 4. , 119. , 82. , ..., 19.4, 82. , 1. ]]) API autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/","title":"Auto MPG"},{"location":"user_guide/data/autompg_data/#auto-mpg","text":"A function that loads the autompg dataset into NumPy arrays. from mlxtend.data import autompg_data","title":"Auto MPG"},{"location":"user_guide/data/autompg_data/#overview","text":"The Auto-MPG dataset for regression analysis. The target ( y ) is defined as the miles per gallon (mpg) for 392 automobiles (6 rows containing \"NaN\"s have been removed. The 8 feature columns are: Features cylinders: multi-valued discrete displacement: continuous horsepower: continuous weight: continuous acceleration: continuous model year: multi-valued discrete origin: multi-valued discrete car name: string (unique for each instance) Number of samples: 392 Target variable (continuous): mpg","title":"Overview"},{"location":"user_guide/data/autompg_data/#references","text":"Source: https://archive.ics.uci.edu/ml/datasets/Auto+MPG Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.","title":"References"},{"location":"user_guide/data/autompg_data/#example-dataset-overview","text":"from mlxtend.data import autompg_data X, y = autompg_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin', 'car name']) print('1st row', X[0]) Dimensions: 392 x 8 Header: ['cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model year', 'origin', 'car name'] 1st row [ 8.00000000e+00 3.07000000e+02 1.30000000e+02 3.50400000e+03 1.20000000e+01 7.00000000e+01 1.00000000e+00 nan] Note that the feature array contains a str column (\"car name\"), thus it is recommended to pick the features as needed and convert it into a float array for further analysis. The example below shows how to get rid of the car name column and cast the NumPy array as a float array. X[:, :-1].astype(float) array([[ 8. , 307. , 130. , ..., 12. , 70. , 1. ], [ 8. , 350. , 165. , ..., 11.5, 70. , 1. ], [ 8. , 318. , 150. , ..., 11. , 70. , 1. ], ..., [ 4. , 135. , 84. , ..., 11.6, 82. , 1. ], [ 4. , 120. , 79. , ..., 18.6, 82. , 1. ], [ 4. , 119. , 82. , ..., 19.4, 82. , 1. ]])","title":"Example - Dataset overview"},{"location":"user_guide/data/autompg_data/#api","text":"autompg_data() Auto MPG dataset. Source : https://archive.ics.uci.edu/ml/datasets/Auto+MPG Number of samples : 392 Continuous target variable : mpg Dataset Attributes: 1) cylinders: multi-valued discrete 2) displacement: continuous 3) horsepower: continuous 4) weight: continuous 5) acceleration: continuous 6) model year: multi-valued discrete 7) origin: multi-valued discrete 8) car name: string (unique for each instance) Returns X, y : [n_samples, n_features], [n_targets] X is the feature matrix with 392 auto samples as rows and 8 feature columns (6 rows with NaNs removed). y is a 1-dimensional array of the target MPG values. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/","title":"API"},{"location":"user_guide/data/boston_housing_data/","text":"Boston Housing Data A function that loads the boston_housing_data dataset into NumPy arrays. from mlxtend.data import boston_housing_data Overview The Boston Housing dataset for regression analysis. Features CRIM: per capita crime rate by town ZN: proportion of residential land zoned for lots over 25,000 sq.ft. INDUS: proportion of non-retail business acres per town CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) NOX: nitric oxides concentration (parts per 10 million) RM: average number of rooms per dwelling AGE: proportion of owner-occupied units built prior to 1940 DIS: weighted distances to five Boston employment centres RAD: index of accessibility to radial highways TAX: full-value property-tax rate per $10,000 PTRATIO: pupil-teacher ratio by town B: 1000(Bk - 0.63)^2 where Bk is the proportion of b. by town LSTAT: % lower status of the population Number of samples: 506 Target variable (continuous): MEDV, Median value of owner-occupied homes in $1000's References Source: https://archive.ics.uci.edu/ml/datasets/Wine Harrison, D. and Rubinfeld, D.L. 'Hedonic prices and the demand for clean air', J. Environ. Economics & Management, vol.5, 81-102, 1978. Example 1 - Dataset overview from mlxtend.data import boston_housing_data X, y = boston_housing_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) (506, 14) Dimensions: 506 x 13 1st row [ 6.32000000e-03 1.80000000e+01 2.31000000e+00 0.00000000e+00 5.38000000e-01 6.57500000e+00 6.52000000e+01 4.09000000e+00 1.00000000e+00 2.96000000e+02 1.53000000e+01 3.96900000e+02 4.98000000e+00] API boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/","title":"Boston Housing Data"},{"location":"user_guide/data/boston_housing_data/#boston-housing-data","text":"A function that loads the boston_housing_data dataset into NumPy arrays. from mlxtend.data import boston_housing_data","title":"Boston Housing Data"},{"location":"user_guide/data/boston_housing_data/#overview","text":"The Boston Housing dataset for regression analysis. Features CRIM: per capita crime rate by town ZN: proportion of residential land zoned for lots over 25,000 sq.ft. INDUS: proportion of non-retail business acres per town CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) NOX: nitric oxides concentration (parts per 10 million) RM: average number of rooms per dwelling AGE: proportion of owner-occupied units built prior to 1940 DIS: weighted distances to five Boston employment centres RAD: index of accessibility to radial highways TAX: full-value property-tax rate per $10,000 PTRATIO: pupil-teacher ratio by town B: 1000(Bk - 0.63)^2 where Bk is the proportion of b. by town LSTAT: % lower status of the population Number of samples: 506 Target variable (continuous): MEDV, Median value of owner-occupied homes in $1000's","title":"Overview"},{"location":"user_guide/data/boston_housing_data/#references","text":"Source: https://archive.ics.uci.edu/ml/datasets/Wine Harrison, D. and Rubinfeld, D.L. 'Hedonic prices and the demand for clean air', J. Environ. Economics & Management, vol.5, 81-102, 1978.","title":"References"},{"location":"user_guide/data/boston_housing_data/#example-1-dataset-overview","text":"from mlxtend.data import boston_housing_data X, y = boston_housing_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) (506, 14) Dimensions: 506 x 13 1st row [ 6.32000000e-03 1.80000000e+01 2.31000000e+00 0.00000000e+00 5.38000000e-01 6.57500000e+00 6.52000000e+01 4.09000000e+00 1.00000000e+00 2.96000000e+02 1.53000000e+01 3.96900000e+02 4.98000000e+00]","title":"Example 1 - Dataset overview"},{"location":"user_guide/data/boston_housing_data/#api","text":"boston_housing_data() Boston Housing dataset. Source : https://archive.ics.uci.edu/ml/datasets/Housing Number of samples : 506 Continuous target variable : MEDV MEDV = Median value of owner-occupied homes in $1000's Dataset Attributes: 1) CRIM per capita crime rate by town 2) ZN proportion of residential land zoned for lots over 25,000 sq.ft. 3) INDUS proportion of non-retail business acres per town 4) CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5) NOX nitric oxides concentration (parts per 10 million) 6) RM average number of rooms per dwelling 7) AGE proportion of owner-occupied units built prior to 1940 8) DIS weighted distances to five Boston employment centres 9) RAD index of accessibility to radial highways 10) TAX full-value property-tax rate per $10,000 11) PTRATIO pupil-teacher ratio by town 12) B 1000(Bk - 0.63)^2 where Bk is the prop. of b. by town 13) LSTAT % lower status of the population Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 506 housing samples as rows and 13 feature columns. y is a 1-dimensional array of the continuous target variable MEDV Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/","title":"API"},{"location":"user_guide/data/iris_data/","text":"Iris Dataset A function that loads the iris dataset into NumPy arrays. from mlxtend.data import iris_data Overview The Iris dataset for classification. Features Sepal length Sepal width Petal length Petal width Number of samples: 150 Target variable (discrete): {50x Setosa, 50x Versicolor, 50x Virginica} References Source: https://archive.ics.uci.edu/ml/datasets/Iris Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository. Irvine, CA: University of California, School of Information and Computer Science. Example 1 - Dataset overview from mlxtend.data import iris_data X, y = iris_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['sepal length', 'sepal width', 'petal length', 'petal width']) print('1st row', X[0]) Dimensions: 150 x 4 Header: ['sepal length', 'sepal width', 'petal length', 'petal width'] 1st row [ 5.1 3.5 1.4 0.2] import numpy as np print('Classes: Setosa, Versicolor, Virginica') print(np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: Setosa, Versicolor, Virginica [0 1 2] Class distribution: [50 50 50] API iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/","title":"Iris Dataset"},{"location":"user_guide/data/iris_data/#iris-dataset","text":"A function that loads the iris dataset into NumPy arrays. from mlxtend.data import iris_data","title":"Iris Dataset"},{"location":"user_guide/data/iris_data/#overview","text":"The Iris dataset for classification. Features Sepal length Sepal width Petal length Petal width Number of samples: 150 Target variable (discrete): {50x Setosa, 50x Versicolor, 50x Virginica}","title":"Overview"},{"location":"user_guide/data/iris_data/#references","text":"Source: https://archive.ics.uci.edu/ml/datasets/Iris Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository. Irvine, CA: University of California, School of Information and Computer Science.","title":"References"},{"location":"user_guide/data/iris_data/#example-1-dataset-overview","text":"from mlxtend.data import iris_data X, y = iris_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['sepal length', 'sepal width', 'petal length', 'petal width']) print('1st row', X[0]) Dimensions: 150 x 4 Header: ['sepal length', 'sepal width', 'petal length', 'petal width'] 1st row [ 5.1 3.5 1.4 0.2] import numpy as np print('Classes: Setosa, Versicolor, Virginica') print(np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: Setosa, Versicolor, Virginica [0 1 2] Class distribution: [50 50 50]","title":"Example 1 - Dataset overview"},{"location":"user_guide/data/iris_data/#api","text":"iris_data() Iris flower dataset. Source : https://archive.ics.uci.edu/ml/datasets/Iris Number of samples : 150 Class labels : {0, 1, 2}, distribution: [50, 50, 50] 0 = setosa, 1 = versicolor, 2 = virginica. Dataset Attributes: 1) sepal length [cm] 2) sepal width [cm] 3) petal length [cm] 4) petal width [cm] Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 150 flower samples as rows, and 4 feature columns sepal length, sepal width, petal length, and petal width. y is a 1-dimensional array of the class labels {0, 1, 2} Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/iris_data/","title":"API"},{"location":"user_guide/data/loadlocal_mnist/","text":"Load the MNIST Dataset from Local Files A utility function that loads the MNIST dataset from byte-form into NumPy arrays. from mlxtend.data import loadlocal_mnist Overview The MNIST dataset was constructed from two datasets of the US National Institute of Standards and Technology (NIST). The training set consists of handwritten digits from 250 different people, 50 percent high school students, and 50 percent employees from the Census Bureau. Note that the test set contains handwritten digits from different people following the same split. The MNIST dataset is publicly available at http://yann.lecun.com/exdb/mnist/ and consists of the following four parts: - Training set images: train-images-idx3-ubyte.gz (9.9 MB, 47 MB unzipped, and 60,000 samples) - Training set labels: train-labels-idx1-ubyte.gz (29 KB, 60 KB unzipped, and 60,000 labels) - Test set images: t10k-images-idx3-ubyte.gz (1.6 MB, 7.8 MB, unzipped and 10,000 samples) - Test set labels: t10k-labels-idx1-ubyte.gz (5 KB, 10 KB unzipped, and 10,000 labels) Features Each feature vector (row in the feature matrix) consists of 784 pixels (intensities) -- unrolled from the original 28x28 pixels images. Number of samples: 50000 images Target variable (discrete): {50x Setosa, 50x Versicolor, 50x Virginica} References Source: http://yann.lecun.com/exdb/mnist/ Y. LeCun and C. Cortes. Mnist handwritten digit database. AT&T Labs [Online]. Available: http://yann. lecun. com/exdb/mnist, 2010. Example 1 Part 1 - Downloading the MNIST dataset 1) Download the MNIST files from Y. LeCun's website http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz for example, via curl -O http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz 2) Unzip the downloaded gzip archives for example, via gunzip t*-ubyte.gz Example 1 Part 2 - Loading MNIST into NumPy Arrays from mlxtend.data import loadlocal_mnist X, y = loadlocal_mnist( images_path='/Users/Sebastian/Desktop/train-images-idx3-ubyte', labels_path='/Users/Sebastian/Desktop/train-labels-idx1-ubyte') print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\n1st row', X[0]) Dimensions: 60000 x 784 1st row [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 18 18 18 126 136 175 26 166 255 247 127 0 0 0 0 0 0 0 0 0 0 0 0 30 36 94 154 170 253 253 253 253 253 225 172 253 242 195 64 0 0 0 0 0 0 0 0 0 0 0 49 238 253 253 253 253 253 253 253 253 251 93 82 82 56 39 0 0 0 0 0 0 0 0 0 0 0 0 18 219 253 253 253 253 253 198 182 247 241 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 80 156 107 253 253 205 11 0 43 154 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 1 154 253 90 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 139 253 190 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 190 253 70 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 35 241 225 160 108 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 81 240 253 253 119 25 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 45 186 253 253 150 27 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 93 252 253 187 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 249 253 249 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 46 130 183 253 253 207 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 39 148 229 253 253 253 250 182 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 24 114 221 253 253 253 253 201 78 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 23 66 213 253 253 253 253 198 81 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 18 171 219 253 253 253 253 195 80 9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 172 226 253 253 253 253 244 133 11 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 136 253 253 253 212 135 132 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] import numpy as np print('Digits: 0 1 2 3 4 5 6 7 8 9') print('labels: %s' % np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Digits: 0 1 2 3 4 5 6 7 8 9 labels: [0 1 2 3 4 5 6 7 8 9] Class distribution: [5923 6742 5958 6131 5842 5421 5918 6265 5851 5949] Store as CSV Files np.savetxt(fname='/Users/Sebastian/Desktop/images.csv', X=X, delimiter=',', fmt='%d') np.savetxt(fname='/Users/Sebastian/Desktop/labels.csv', X=y, delimiter=',', fmt='%d') API loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/","title":"Load the MNIST Dataset from Local Files"},{"location":"user_guide/data/loadlocal_mnist/#load-the-mnist-dataset-from-local-files","text":"A utility function that loads the MNIST dataset from byte-form into NumPy arrays. from mlxtend.data import loadlocal_mnist","title":"Load the MNIST Dataset from Local Files"},{"location":"user_guide/data/loadlocal_mnist/#overview","text":"The MNIST dataset was constructed from two datasets of the US National Institute of Standards and Technology (NIST). The training set consists of handwritten digits from 250 different people, 50 percent high school students, and 50 percent employees from the Census Bureau. Note that the test set contains handwritten digits from different people following the same split. The MNIST dataset is publicly available at http://yann.lecun.com/exdb/mnist/ and consists of the following four parts: - Training set images: train-images-idx3-ubyte.gz (9.9 MB, 47 MB unzipped, and 60,000 samples) - Training set labels: train-labels-idx1-ubyte.gz (29 KB, 60 KB unzipped, and 60,000 labels) - Test set images: t10k-images-idx3-ubyte.gz (1.6 MB, 7.8 MB, unzipped and 10,000 samples) - Test set labels: t10k-labels-idx1-ubyte.gz (5 KB, 10 KB unzipped, and 10,000 labels) Features Each feature vector (row in the feature matrix) consists of 784 pixels (intensities) -- unrolled from the original 28x28 pixels images. Number of samples: 50000 images Target variable (discrete): {50x Setosa, 50x Versicolor, 50x Virginica}","title":"Overview"},{"location":"user_guide/data/loadlocal_mnist/#references","text":"Source: http://yann.lecun.com/exdb/mnist/ Y. LeCun and C. Cortes. Mnist handwritten digit database. AT&T Labs [Online]. Available: http://yann. lecun. com/exdb/mnist, 2010.","title":"References"},{"location":"user_guide/data/loadlocal_mnist/#example-1-part-1-downloading-the-mnist-dataset","text":"1) Download the MNIST files from Y. LeCun's website http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz for example, via curl -O http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz 2) Unzip the downloaded gzip archives for example, via gunzip t*-ubyte.gz","title":"Example 1 Part 1 - Downloading the MNIST dataset"},{"location":"user_guide/data/loadlocal_mnist/#example-1-part-2-loading-mnist-into-numpy-arrays","text":"from mlxtend.data import loadlocal_mnist X, y = loadlocal_mnist( images_path='/Users/Sebastian/Desktop/train-images-idx3-ubyte', labels_path='/Users/Sebastian/Desktop/train-labels-idx1-ubyte') print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\n1st row', X[0]) Dimensions: 60000 x 784 1st row [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 18 18 18 126 136 175 26 166 255 247 127 0 0 0 0 0 0 0 0 0 0 0 0 30 36 94 154 170 253 253 253 253 253 225 172 253 242 195 64 0 0 0 0 0 0 0 0 0 0 0 49 238 253 253 253 253 253 253 253 253 251 93 82 82 56 39 0 0 0 0 0 0 0 0 0 0 0 0 18 219 253 253 253 253 253 198 182 247 241 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 80 156 107 253 253 205 11 0 43 154 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 1 154 253 90 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 139 253 190 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 190 253 70 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 35 241 225 160 108 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 81 240 253 253 119 25 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 45 186 253 253 150 27 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 93 252 253 187 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 249 253 249 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 46 130 183 253 253 207 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 39 148 229 253 253 253 250 182 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 24 114 221 253 253 253 253 201 78 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 23 66 213 253 253 253 253 198 81 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 18 171 219 253 253 253 253 195 80 9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 55 172 226 253 253 253 253 244 133 11 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 136 253 253 253 212 135 132 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] import numpy as np print('Digits: 0 1 2 3 4 5 6 7 8 9') print('labels: %s' % np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Digits: 0 1 2 3 4 5 6 7 8 9 labels: [0 1 2 3 4 5 6 7 8 9] Class distribution: [5923 6742 5958 6131 5842 5421 5918 6265 5851 5949]","title":"Example 1 Part 2 - Loading MNIST into NumPy Arrays"},{"location":"user_guide/data/loadlocal_mnist/#store-as-csv-files","text":"np.savetxt(fname='/Users/Sebastian/Desktop/images.csv', X=X, delimiter=',', fmt='%d') np.savetxt(fname='/Users/Sebastian/Desktop/labels.csv', X=y, delimiter=',', fmt='%d')","title":"Store as CSV Files"},{"location":"user_guide/data/loadlocal_mnist/#api","text":"loadlocal_mnist(images_path, labels_path) Read MNIST from ubyte files. Parameters images_path : str path to the test or train MNIST ubyte file labels_path : str path to the test or train MNIST class labels file Returns images : [n_samples, n_pixels] numpy.array Pixel values of the images. labels : [n_samples] numpy array Target class labels Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/","title":"API"},{"location":"user_guide/data/make_multiplexer_dataset/","text":"Make Multiplexer Dataset Function that creates a dataset generated by a n-bit Boolean multiplexer for evaluating supervised learning algorithms. from mlxtend.data import make_multiplexer_dataset Overview The make_multiplexer_dataset function creates a dataset generated by an n-bit Boolean multiplexer. Such dataset represents a dataset generated by a simple rule, based on the behavior of a electric multiplexer, yet presents a relatively challenging classification problem for supervised learning algorithm with interactions between features (epistasis) as it may be encountered in many real-world scenarios [1]. The following illustration depicts a 6-bit multiplexer that consists of 2 address bits and 4 register bits. The address bits converted to decimal representation point to a position in the register bit. For example, if the address bits are \"00\" (0 in decimal), the address bits point to the register bit at position 0. The value of the register position pointed to determines the class label. For example, if the register bit at position is 0, the class label is 0. Vice versa, if the register bit at position 0 is 1, the class label is 1. In the example above, the address bits \"10\" (2 in decimal) point to the 3rd register position (as we start counting from index 0), which has a bit value of 1. Hence, the class label is 1. Below are a few more examples: Address bits: [0, 1], register bits: [1, 0, 1, 1], class label: 0 Address bits: [0, 1], register bits: [1, 1, 1, 0], class label: 1 Address bits: [1, 0], register bits: [1, 0, 0, 1], class label: 0 Address bits: [1, 1], register bits: [1, 1, 1, 0], class label: 0 Address bits: [0, 1], register bits: [0, 1, 1, 0], class label: 1 Address bits: [0, 1], register bits: [1, 0, 0, 1], class label: 0 Address bits: [0, 1], register bits: [0, 1, 1, 1], class label: 1 Address bits: [0, 1], register bits: [0, 0, 0, 0], class label: 0 Address bits: [1, 0], register bits: [1, 0, 1, 1], class label: 1 Address bits: [0, 1], register bits: [1, 1, 1, 1], class label: 1 Note that in the implementation of the multiplexer function, if the number of address bits is set to 2, this results in a 6 bit multiplexer as two bit can have 2^2=4 different register positions (2 bit + 4 bit = 6 bit). However, if we choose 3 address bits instead, 2^3=8 positions would be covered, resulting in a 11 bit (3 bit + 8 bit = 11 bit) multiplexer, and so forth. References [1] Urbanowicz, R. J., & Browne, W. N. (2017). Introduction to Learning Classifier Systems . Springer. Example 1 -- 6-bit multiplexer This simple example illustrates how to create dataset from a 6-bit multiplexer import numpy as np from mlxtend.data import make_multiplexer_dataset X, y = make_multiplexer_dataset(address_bits=2, sample_size=10, positive_class_ratio=0.5, shuffle=False, random_seed=123) print('Features:\\n', X) print('\\nClass labels:\\n', y) Features: [[0 1 0 1 0 1] [1 0 0 0 1 1] [0 1 1 1 0 0] [0 1 1 1 0 0] [0 0 1 1 0 0] [0 1 0 0 0 0] [0 1 1 0 1 1] [1 0 1 0 0 0] [1 0 0 1 0 1] [1 0 1 0 0 1]] Class labels: [1 1 1 1 1 0 0 0 0 0] API make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset","title":"Make Multiplexer Dataset"},{"location":"user_guide/data/make_multiplexer_dataset/#make-multiplexer-dataset","text":"Function that creates a dataset generated by a n-bit Boolean multiplexer for evaluating supervised learning algorithms. from mlxtend.data import make_multiplexer_dataset","title":"Make Multiplexer Dataset"},{"location":"user_guide/data/make_multiplexer_dataset/#overview","text":"The make_multiplexer_dataset function creates a dataset generated by an n-bit Boolean multiplexer. Such dataset represents a dataset generated by a simple rule, based on the behavior of a electric multiplexer, yet presents a relatively challenging classification problem for supervised learning algorithm with interactions between features (epistasis) as it may be encountered in many real-world scenarios [1]. The following illustration depicts a 6-bit multiplexer that consists of 2 address bits and 4 register bits. The address bits converted to decimal representation point to a position in the register bit. For example, if the address bits are \"00\" (0 in decimal), the address bits point to the register bit at position 0. The value of the register position pointed to determines the class label. For example, if the register bit at position is 0, the class label is 0. Vice versa, if the register bit at position 0 is 1, the class label is 1. In the example above, the address bits \"10\" (2 in decimal) point to the 3rd register position (as we start counting from index 0), which has a bit value of 1. Hence, the class label is 1. Below are a few more examples: Address bits: [0, 1], register bits: [1, 0, 1, 1], class label: 0 Address bits: [0, 1], register bits: [1, 1, 1, 0], class label: 1 Address bits: [1, 0], register bits: [1, 0, 0, 1], class label: 0 Address bits: [1, 1], register bits: [1, 1, 1, 0], class label: 0 Address bits: [0, 1], register bits: [0, 1, 1, 0], class label: 1 Address bits: [0, 1], register bits: [1, 0, 0, 1], class label: 0 Address bits: [0, 1], register bits: [0, 1, 1, 1], class label: 1 Address bits: [0, 1], register bits: [0, 0, 0, 0], class label: 0 Address bits: [1, 0], register bits: [1, 0, 1, 1], class label: 1 Address bits: [0, 1], register bits: [1, 1, 1, 1], class label: 1 Note that in the implementation of the multiplexer function, if the number of address bits is set to 2, this results in a 6 bit multiplexer as two bit can have 2^2=4 different register positions (2 bit + 4 bit = 6 bit). However, if we choose 3 address bits instead, 2^3=8 positions would be covered, resulting in a 11 bit (3 bit + 8 bit = 11 bit) multiplexer, and so forth.","title":"Overview"},{"location":"user_guide/data/make_multiplexer_dataset/#references","text":"[1] Urbanowicz, R. J., & Browne, W. N. (2017). Introduction to Learning Classifier Systems . Springer.","title":"References"},{"location":"user_guide/data/make_multiplexer_dataset/#example-1-6-bit-multiplexer","text":"This simple example illustrates how to create dataset from a 6-bit multiplexer import numpy as np from mlxtend.data import make_multiplexer_dataset X, y = make_multiplexer_dataset(address_bits=2, sample_size=10, positive_class_ratio=0.5, shuffle=False, random_seed=123) print('Features:\\n', X) print('\\nClass labels:\\n', y) Features: [[0 1 0 1 0 1] [1 0 0 0 1 1] [0 1 1 1 0 0] [0 1 1 1 0 0] [0 0 1 1 0 0] [0 1 0 0 0 0] [0 1 1 0 1 1] [1 0 1 0 0 0] [1 0 0 1 0 1] [1 0 1 0 0 1]] Class labels: [1 1 1 1 1 0 0 0 0 0]","title":"Example 1 -- 6-bit multiplexer"},{"location":"user_guide/data/make_multiplexer_dataset/#api","text":"make_multiplexer_dataset(address_bits=2, sample_size=100, positive_class_ratio=0.5, shuffle=False, random_seed=None) Function to create a binary n-bit multiplexer dataset. New in mlxtend v0.9 Parameters address_bits : int (default: 2) A positive integer that determines the number of address bits in the multiplexer, which in turn determine the n-bit capacity of the multiplexer and therefore the number of features. The number of features is determined by the number of address bits. For example, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). If address_bits=3 , then this results in an 11-bit multiplexer as (2 + 2^3 = 11) with 11 features. sample_size : int (default: 100) The total number of samples generated. positive_class_ratio : float (default: 0.5) The fraction (a float between 0 and 1) of samples in the sample_size d dataset that have class label 1. If positive_class_ratio=0.5 (default), then the ratio of class 0 and class 1 samples is perfectly balanced. shuffle : Bool (default: False) Whether or not to shuffle the features and labels. If False (default), the samples are returned in sorted order starting with sample_size /2 samples with class label 0 and followed by sample_size /2 samples with class label 1. random_seed : int (default: None) Random seed used for generating the multiplexer samples and shuffling. Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with the number of samples equal to sample_size . The number of features is determined by the number of address bits. For instance, 2 address bits will result in a 6 bit multiplexer and consequently 6 features (2 + 2^2 = 6). All features are binary (values in {0, 1}). y is a 1-dimensional array of class labels in {0, 1}. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset","title":"API"},{"location":"user_guide/data/mnist_data/","text":"MNIST Dataset A function that loads the MNIST dataset into NumPy arrays. from mlxtend.data import mnist_data Overview The MNIST dataset was constructed from two datasets of the US National Institute of Standards and Technology (NIST). The training set consists of handwritten digits from 250 different people, 50 percent high school students, and 50 percent employees from the Census Bureau. Note that the test set contains handwritten digits from different people following the same split. Features Each feature vector (row in the feature matrix) consists of 784 pixels (intensities) -- unrolled from the original 28x28 pixels images. Number of samples: A subset of 5000 images (the first 500 digits of each class) Target variable (discrete): {500x 0, ..., 500x 9} References Source: http://yann.lecun.com/exdb/mnist/ Y. LeCun and C. Cortes. Mnist handwritten digit database. AT&T Labs [Online]. Available: http://yann.lecun.com/exdb/mnist , 2010. Example 1 - Dataset overview from mlxtend.data import mnist_data X, y = mnist_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) Dimensions: 5000 x 784 1st row [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 51. 159. 253. 159. 50. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 48. 238. 252. 252. 252. 237. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 54. 227. 253. 252. 239. 233. 252. 57. 6. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 10. 60. 224. 252. 253. 252. 202. 84. 252. 253. 122. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 163. 252. 252. 252. 253. 252. 252. 96. 189. 253. 167. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 51. 238. 253. 253. 190. 114. 253. 228. 47. 79. 255. 168. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 48. 238. 252. 252. 179. 12. 75. 121. 21. 0. 0. 253. 243. 50. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 38. 165. 253. 233. 208. 84. 0. 0. 0. 0. 0. 0. 253. 252. 165. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 7. 178. 252. 240. 71. 19. 28. 0. 0. 0. 0. 0. 0. 253. 252. 195. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 57. 252. 252. 63. 0. 0. 0. 0. 0. 0. 0. 0. 0. 253. 252. 195. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 198. 253. 190. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 255. 253. 196. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 76. 246. 252. 112. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 253. 252. 148. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 230. 25. 0. 0. 0. 0. 0. 0. 0. 0. 7. 135. 253. 186. 12. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 223. 0. 0. 0. 0. 0. 0. 0. 0. 7. 131. 252. 225. 71. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 145. 0. 0. 0. 0. 0. 0. 0. 48. 165. 252. 173. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 86. 253. 225. 0. 0. 0. 0. 0. 0. 114. 238. 253. 162. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 249. 146. 48. 29. 85. 178. 225. 253. 223. 167. 56. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 252. 252. 229. 215. 252. 252. 252. 196. 130. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 28. 199. 252. 252. 253. 252. 252. 233. 145. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 25. 128. 252. 253. 252. 141. 37. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] import numpy as np print('Classes: Setosa, Versicolor, Virginica') print(np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: Setosa, Versicolor, Virginica [0 1 2 3 4 5 6 7 8 9] Class distribution: [500 500 500 500 500 500 500 500 500 500] Example 2 - Visualize MNIST %matplotlib inline import matplotlib.pyplot as plt def plot_digit(X, y, idx): img = X[idx].reshape(28,28) plt.imshow(img, cmap='Greys', interpolation='nearest') plt.title('true label: %d' % y[idx]) plt.show() plot_digit(X, y, 4) API mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/","title":"MNIST Dataset"},{"location":"user_guide/data/mnist_data/#mnist-dataset","text":"A function that loads the MNIST dataset into NumPy arrays. from mlxtend.data import mnist_data","title":"MNIST Dataset"},{"location":"user_guide/data/mnist_data/#overview","text":"The MNIST dataset was constructed from two datasets of the US National Institute of Standards and Technology (NIST). The training set consists of handwritten digits from 250 different people, 50 percent high school students, and 50 percent employees from the Census Bureau. Note that the test set contains handwritten digits from different people following the same split. Features Each feature vector (row in the feature matrix) consists of 784 pixels (intensities) -- unrolled from the original 28x28 pixels images. Number of samples: A subset of 5000 images (the first 500 digits of each class) Target variable (discrete): {500x 0, ..., 500x 9}","title":"Overview"},{"location":"user_guide/data/mnist_data/#references","text":"Source: http://yann.lecun.com/exdb/mnist/ Y. LeCun and C. Cortes. Mnist handwritten digit database. AT&T Labs [Online]. Available: http://yann.lecun.com/exdb/mnist , 2010.","title":"References"},{"location":"user_guide/data/mnist_data/#example-1-dataset-overview","text":"from mlxtend.data import mnist_data X, y = mnist_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) Dimensions: 5000 x 784 1st row [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 51. 159. 253. 159. 50. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 48. 238. 252. 252. 252. 237. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 54. 227. 253. 252. 239. 233. 252. 57. 6. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 10. 60. 224. 252. 253. 252. 202. 84. 252. 253. 122. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 163. 252. 252. 252. 253. 252. 252. 96. 189. 253. 167. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 51. 238. 253. 253. 190. 114. 253. 228. 47. 79. 255. 168. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 48. 238. 252. 252. 179. 12. 75. 121. 21. 0. 0. 253. 243. 50. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 38. 165. 253. 233. 208. 84. 0. 0. 0. 0. 0. 0. 253. 252. 165. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 7. 178. 252. 240. 71. 19. 28. 0. 0. 0. 0. 0. 0. 253. 252. 195. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 57. 252. 252. 63. 0. 0. 0. 0. 0. 0. 0. 0. 0. 253. 252. 195. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 198. 253. 190. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 255. 253. 196. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 76. 246. 252. 112. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 253. 252. 148. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 230. 25. 0. 0. 0. 0. 0. 0. 0. 0. 7. 135. 253. 186. 12. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 223. 0. 0. 0. 0. 0. 0. 0. 0. 7. 131. 252. 225. 71. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 145. 0. 0. 0. 0. 0. 0. 0. 48. 165. 252. 173. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 86. 253. 225. 0. 0. 0. 0. 0. 0. 114. 238. 253. 162. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 249. 146. 48. 29. 85. 178. 225. 253. 223. 167. 56. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 85. 252. 252. 252. 229. 215. 252. 252. 252. 196. 130. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 28. 199. 252. 252. 253. 252. 252. 233. 145. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 25. 128. 252. 253. 252. 141. 37. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] import numpy as np print('Classes: Setosa, Versicolor, Virginica') print(np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: Setosa, Versicolor, Virginica [0 1 2 3 4 5 6 7 8 9] Class distribution: [500 500 500 500 500 500 500 500 500 500]","title":"Example 1 - Dataset overview"},{"location":"user_guide/data/mnist_data/#example-2-visualize-mnist","text":"%matplotlib inline import matplotlib.pyplot as plt def plot_digit(X, y, idx): img = X[idx].reshape(28,28) plt.imshow(img, cmap='Greys', interpolation='nearest') plt.title('true label: %d' % y[idx]) plt.show() plot_digit(X, y, 4)","title":"Example 2 - Visualize MNIST"},{"location":"user_guide/data/mnist_data/#api","text":"mnist_data() 5000 samples from the MNIST handwritten digits dataset. Data Source : http://yann.lecun.com/exdb/mnist/ Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 5000 image samples as rows, each row consists of 28x28 pixels that were unrolled into 784 pixel feature vectors. y contains the 10 unique class labels 0-9. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/","title":"API"},{"location":"user_guide/data/three_blobs_data/","text":"Three Blobs Dataset A function that loads the three_blobs dataset into NumPy arrays. from mlxtend.data import three_blobs_data Overview A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels \\in {0, 1, 2}, distribution: [50, 50, 50] References Example 1 - Dataset overview from mlxtend.data import three_blobs_data X, y = three_blobs_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) Dimensions: 150 x 2 1st row [ 2.60509732 1.22529553] import numpy as np print('Suggested cluster labels') print(np.unique(y)) print('Label distribution: %s' % np.bincount(y)) Suggested cluster labels [0 1 2] Label distribution: [50 50 50] import matplotlib.pyplot as plt plt.scatter(X[:,0], X[:,1], c='white', marker='o', s=50) plt.grid() plt.show() plt.scatter(X[y == 0, 0], X[y == 0, 1], s=50, c='lightgreen', marker='s', label='cluster 1') plt.scatter(X[y == 1,0], X[y == 1,1], s=50, c='orange', marker='o', label='cluster 2') plt.scatter(X[y == 2,0], X[y == 2,1], s=50, c='lightblue', marker='v', label='cluster 3') plt.legend(loc='lower left') plt.grid() plt.show() API three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data","title":"Three Blobs Dataset"},{"location":"user_guide/data/three_blobs_data/#three-blobs-dataset","text":"A function that loads the three_blobs dataset into NumPy arrays. from mlxtend.data import three_blobs_data","title":"Three Blobs Dataset"},{"location":"user_guide/data/three_blobs_data/#overview","text":"A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels \\in {0, 1, 2}, distribution: [50, 50, 50]","title":"Overview"},{"location":"user_guide/data/three_blobs_data/#references","text":"","title":"References"},{"location":"user_guide/data/three_blobs_data/#example-1-dataset-overview","text":"from mlxtend.data import three_blobs_data X, y = three_blobs_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('1st row', X[0]) Dimensions: 150 x 2 1st row [ 2.60509732 1.22529553] import numpy as np print('Suggested cluster labels') print(np.unique(y)) print('Label distribution: %s' % np.bincount(y)) Suggested cluster labels [0 1 2] Label distribution: [50 50 50] import matplotlib.pyplot as plt plt.scatter(X[:,0], X[:,1], c='white', marker='o', s=50) plt.grid() plt.show() plt.scatter(X[y == 0, 0], X[y == 0, 1], s=50, c='lightgreen', marker='s', label='cluster 1') plt.scatter(X[y == 1,0], X[y == 1,1], s=50, c='orange', marker='o', label='cluster 2') plt.scatter(X[y == 2,0], X[y == 2,1], s=50, c='lightblue', marker='v', label='cluster 3') plt.legend(loc='lower left') plt.grid() plt.show()","title":"Example 1 - Dataset overview"},{"location":"user_guide/data/three_blobs_data/#api","text":"three_blobs_data() A random dataset of 3 2D blobs for clustering. Number of samples : 150 Suggested labels : {0, 1, 2}, distribution: [50, 50, 50] Returns X, y : [n_samples, n_features], [n_cluster_labels] X is the feature matrix with 159 samples as rows and 2 feature columns. y is a 1-dimensional array of the 3 suggested cluster labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data","title":"API"},{"location":"user_guide/data/wine_data/","text":"Wine Dataset A function that loads the Wine dataset into NumPy arrays. from mlxtend.data import wine_data Overview The Wine dataset for classification. Samples 178 Features 13 Classes 3 Data Set Characteristics: Multivariate Attribute Characteristics: Integer, Real Associated Tasks: Classification Missing Values None column attribute 1) Class Label 2) Alcohol 3) Malic acid 4) Ash 5) Alcalinity of ash 6) Magnesium 7) Total phenols 8) Flavanoids 9) Nonflavanoid phenols 10) Proanthocyanins 11) Color intensity 12) Hue 13) OD280/OD315 of diluted wines 14) Proline class samples 0 59 1 71 2 48 References Forina, M. et al, PARVUS - An Extendible Package for Data Exploration, Classification and Correlation. Institute of Pharmaceutical and Food Analysis and Technologies, Via Brigata Salerno, 16147 Genoa, Italy. Source: https://archive.ics.uci.edu/ml/datasets/Wine Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository. Irvine, CA: University of California, School of Information and Computer Science. Example 1 - Dataset overview from mlxtend.data import wine_data X, y = wine_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['alcohol', 'malic acid', 'ash', 'ash alcalinity', 'magnesium', 'total phenols', 'flavanoids', 'nonflavanoid phenols', 'proanthocyanins', 'color intensity', 'hue', 'OD280/OD315 of diluted wines', 'proline']) print('1st row', X[0]) Dimensions: 178 x 13 Header: ['alcohol', 'malic acid', 'ash', 'ash alcalinity', 'magnesium', 'total phenols', 'flavanoids', 'nonflavanoid phenols', 'proanthocyanins', 'color intensity', 'hue', 'OD280/OD315 of diluted wines', 'proline'] 1st row [ 1.42300000e+01 1.71000000e+00 2.43000000e+00 1.56000000e+01 1.27000000e+02 2.80000000e+00 3.06000000e+00 2.80000000e-01 2.29000000e+00 5.64000000e+00 1.04000000e+00 3.92000000e+00 1.06500000e+03] import numpy as np print('Classes: %s' % np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: [0 1 2] Class distribution: [59 71 48] API wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"Wine Dataset"},{"location":"user_guide/data/wine_data/#wine-dataset","text":"A function that loads the Wine dataset into NumPy arrays. from mlxtend.data import wine_data","title":"Wine Dataset"},{"location":"user_guide/data/wine_data/#overview","text":"The Wine dataset for classification. Samples 178 Features 13 Classes 3 Data Set Characteristics: Multivariate Attribute Characteristics: Integer, Real Associated Tasks: Classification Missing Values None column attribute 1) Class Label 2) Alcohol 3) Malic acid 4) Ash 5) Alcalinity of ash 6) Magnesium 7) Total phenols 8) Flavanoids 9) Nonflavanoid phenols 10) Proanthocyanins 11) Color intensity 12) Hue 13) OD280/OD315 of diluted wines 14) Proline class samples 0 59 1 71 2 48","title":"Overview"},{"location":"user_guide/data/wine_data/#references","text":"Forina, M. et al, PARVUS - An Extendible Package for Data Exploration, Classification and Correlation. Institute of Pharmaceutical and Food Analysis and Technologies, Via Brigata Salerno, 16147 Genoa, Italy. Source: https://archive.ics.uci.edu/ml/datasets/Wine Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository. Irvine, CA: University of California, School of Information and Computer Science.","title":"References"},{"location":"user_guide/data/wine_data/#example-1-dataset-overview","text":"from mlxtend.data import wine_data X, y = wine_data() print('Dimensions: %s x %s' % (X.shape[0], X.shape[1])) print('\\nHeader: %s' % ['alcohol', 'malic acid', 'ash', 'ash alcalinity', 'magnesium', 'total phenols', 'flavanoids', 'nonflavanoid phenols', 'proanthocyanins', 'color intensity', 'hue', 'OD280/OD315 of diluted wines', 'proline']) print('1st row', X[0]) Dimensions: 178 x 13 Header: ['alcohol', 'malic acid', 'ash', 'ash alcalinity', 'magnesium', 'total phenols', 'flavanoids', 'nonflavanoid phenols', 'proanthocyanins', 'color intensity', 'hue', 'OD280/OD315 of diluted wines', 'proline'] 1st row [ 1.42300000e+01 1.71000000e+00 2.43000000e+00 1.56000000e+01 1.27000000e+02 2.80000000e+00 3.06000000e+00 2.80000000e-01 2.29000000e+00 5.64000000e+00 1.04000000e+00 3.92000000e+00 1.06500000e+03] import numpy as np print('Classes: %s' % np.unique(y)) print('Class distribution: %s' % np.bincount(y)) Classes: [0 1 2] Class distribution: [59 71 48]","title":"Example 1 - Dataset overview"},{"location":"user_guide/data/wine_data/#api","text":"wine_data() Wine dataset. Source : https://archive.ics.uci.edu/ml/datasets/Wine Number of samples : 178 Class labels : {0, 1, 2}, distribution: [59, 71, 48] Dataset Attributes: 1) Alcohol 2) Malic acid 3) Ash 4) Alcalinity of ash 5) Magnesium 6) Total phenols 7) Flavanoids 8) Nonflavanoid phenols 9) Proanthocyanins 10) Color intensity 11) Hue 12) OD280/OD315 of diluted wines 13) Proline Returns X, y : [n_samples, n_features], [n_class_labels] X is the feature matrix with 178 wine samples as rows and 13 feature columns. y is a 1-dimensional array of the 3 class labels 0, 1, 2 Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/data/wine_data","title":"API"},{"location":"user_guide/evaluate/BootstrapOutOfBag/","text":"BootstrapOutOfBag An implementation of the out-of-bag bootstrap to evaluate supervised learning algorithms. from mlxtend.evaluate import BootstrapOutOfBag Overview Originally, the bootstrap method aims to determine the statistical properties of an estimator when the underlying distribution was unknown and additional samples are not available. Now, in order to exploit this method for the evaluation of predictive models, such as hypotheses for classification and regression, we may prefer a slightly different approach to bootstrapping using the so-called Out-Of-Bag (OOB) or Leave-One-Out Bootstrap (LOOB) technique. Here, we use out-of-bag samples as test sets for evaluation instead of evaluating the model on the training data. Out-of-bag samples are the unique sets of instances that are not used for model fitting as shown in the figure below [1]. The figure above illustrates how three random bootstrap samples drawn from an exemplary ten-sample dataset ( X_1,X_2, ..., X_{10} ) and their out-of-bag sample for testing may look like. In practice, Bradley Efron and Robert Tibshirani recommend drawing 50 to 200 bootstrap samples as being sufficient for reliable estimates [2]. References [1] https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html [2] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997. Example 1 -- Evaluating the predictive performance of a model The BootstrapOutOfBag class mimics the behavior of scikit-learn's cross-validation classes, e.g., KFold : from mlxtend.evaluate import BootstrapOutOfBag import numpy as np oob = BootstrapOutOfBag(n_splits=3) for train, test in oob.split(np.array([1, 2, 3, 4, 5])): print(train, test) [4 2 1 3 3] [0] [2 4 1 2 1] [0 3] [4 3 3 4 1] [0 2] Consequently, we can use BootstrapOutOfBag objects via the cross_val_score method: from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score iris = load_iris() X = iris.data y = iris.target lr = LogisticRegression() print(cross_val_score(lr, X, y)) [ 0.96078431 0.92156863 0.95833333] print(cross_val_score(lr, X, y, cv=BootstrapOutOfBag(n_splits=3, random_seed=456))) [ 0.92727273 0.96226415 0.94444444] In practice, it is recommended to run at least 200 iterations, though: print('Mean accuracy: %.1f%%' % np.mean(100*cross_val_score( lr, X, y, cv=BootstrapOutOfBag(n_splits=200, random_seed=456)))) Mean accuracy: 94.8% Using the bootstrap, we can use the percentile method to compute the confidence bounds of the performance estimate. We pick our lower and upper confidence bounds as follows: ACC_{lower} = \\alpha_1th percentile of the ACC_{boot} distribution ACC_{lower} = \\alpha_2th percentile of the ACC_{boot} distribution where \\alpha_1 = \\alpha and \\alpha_2 = 1-\\alpha , and the degree of confidence to compute the 100 \\times (1-2 \\times \\alpha) confidence interval. For instance, to compute a 95% confidence interval, we pick \\alpha=0.025 to obtain the 2.5th and 97.5th percentiles of the b bootstrap samples distribution as the upper and lower confidence bounds. import matplotlib.pyplot as plt %matplotlib inline accuracies = cross_val_score(lr, X, y, cv=BootstrapOutOfBag(n_splits=1000, random_seed=456)) mean = np.mean(accuracies) lower = np.percentile(accuracies, 2.5) upper = np.percentile(accuracies, 97.5) fig, ax = plt.subplots(figsize=(8, 4)) ax.vlines(mean, [0], 40, lw=2.5, linestyle='-', label='mean') ax.vlines(lower, [0], 15, lw=2.5, linestyle='-.', label='CI95 percentile') ax.vlines(upper, [0], 15, lw=2.5, linestyle='-.') ax.hist(accuracies, bins=11, color='#0080ff', edgecolor=\"none\", alpha=0.3) plt.legend(loc='upper left') plt.show() API BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/ Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn.","title":"BootstrapOutOfBag"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#bootstrapoutofbag","text":"An implementation of the out-of-bag bootstrap to evaluate supervised learning algorithms. from mlxtend.evaluate import BootstrapOutOfBag","title":"BootstrapOutOfBag"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#overview","text":"Originally, the bootstrap method aims to determine the statistical properties of an estimator when the underlying distribution was unknown and additional samples are not available. Now, in order to exploit this method for the evaluation of predictive models, such as hypotheses for classification and regression, we may prefer a slightly different approach to bootstrapping using the so-called Out-Of-Bag (OOB) or Leave-One-Out Bootstrap (LOOB) technique. Here, we use out-of-bag samples as test sets for evaluation instead of evaluating the model on the training data. Out-of-bag samples are the unique sets of instances that are not used for model fitting as shown in the figure below [1]. The figure above illustrates how three random bootstrap samples drawn from an exemplary ten-sample dataset ( X_1,X_2, ..., X_{10} ) and their out-of-bag sample for testing may look like. In practice, Bradley Efron and Robert Tibshirani recommend drawing 50 to 200 bootstrap samples as being sufficient for reliable estimates [2].","title":"Overview"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#references","text":"[1] https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html [2] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997.","title":"References"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#example-1-evaluating-the-predictive-performance-of-a-model","text":"The BootstrapOutOfBag class mimics the behavior of scikit-learn's cross-validation classes, e.g., KFold : from mlxtend.evaluate import BootstrapOutOfBag import numpy as np oob = BootstrapOutOfBag(n_splits=3) for train, test in oob.split(np.array([1, 2, 3, 4, 5])): print(train, test) [4 2 1 3 3] [0] [2 4 1 2 1] [0 3] [4 3 3 4 1] [0 2] Consequently, we can use BootstrapOutOfBag objects via the cross_val_score method: from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score iris = load_iris() X = iris.data y = iris.target lr = LogisticRegression() print(cross_val_score(lr, X, y)) [ 0.96078431 0.92156863 0.95833333] print(cross_val_score(lr, X, y, cv=BootstrapOutOfBag(n_splits=3, random_seed=456))) [ 0.92727273 0.96226415 0.94444444] In practice, it is recommended to run at least 200 iterations, though: print('Mean accuracy: %.1f%%' % np.mean(100*cross_val_score( lr, X, y, cv=BootstrapOutOfBag(n_splits=200, random_seed=456)))) Mean accuracy: 94.8% Using the bootstrap, we can use the percentile method to compute the confidence bounds of the performance estimate. We pick our lower and upper confidence bounds as follows: ACC_{lower} = \\alpha_1th percentile of the ACC_{boot} distribution ACC_{lower} = \\alpha_2th percentile of the ACC_{boot} distribution where \\alpha_1 = \\alpha and \\alpha_2 = 1-\\alpha , and the degree of confidence to compute the 100 \\times (1-2 \\times \\alpha) confidence interval. For instance, to compute a 95% confidence interval, we pick \\alpha=0.025 to obtain the 2.5th and 97.5th percentiles of the b bootstrap samples distribution as the upper and lower confidence bounds. import matplotlib.pyplot as plt %matplotlib inline accuracies = cross_val_score(lr, X, y, cv=BootstrapOutOfBag(n_splits=1000, random_seed=456)) mean = np.mean(accuracies) lower = np.percentile(accuracies, 2.5) upper = np.percentile(accuracies, 97.5) fig, ax = plt.subplots(figsize=(8, 4)) ax.vlines(mean, [0], 40, lw=2.5, linestyle='-', label='mean') ax.vlines(lower, [0], 15, lw=2.5, linestyle='-.', label='CI95 percentile') ax.vlines(upper, [0], 15, lw=2.5, linestyle='-.') ax.hist(accuracies, bins=11, color='#0080ff', edgecolor=\"none\", alpha=0.3) plt.legend(loc='upper left') plt.show()","title":"Example 1 -- Evaluating the predictive performance of a model"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#api","text":"BootstrapOutOfBag(n_splits=200, random_seed=None) Parameters n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. Returns train_idx : ndarray The training set indices for that split. test_idx : ndarray The testing set indices for that split. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/","title":"API"},{"location":"user_guide/evaluate/BootstrapOutOfBag/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility with scikit-learn. y : object Always ignored, exists for compatibility with scikit-learn. groups : object Always ignored, exists for compatibility with scikit-learn. Returns n_splits : int Returns the number of splitting iterations in the cross-validator. split(X, y=None, groups=None) y : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn. groups : array-like or None (default: None) Argument is not used and only included as parameter for compatibility, similar to KFold in scikit-learn.","title":"Methods"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/","text":"PredefinedHoldoutSplit Split a dataset into a train and validation subset for validation based on user-specified indices. from mlxtend.evaluate import PredefinedHoldoutSplit Overview The PredefinedHoldoutSplit class serves as an alternative to scikit-learn's KFold class, where the PredefinedHoldoutSplit class splits a dataset into training and a validation subsets without rotation, based on validation indices specified by the user. The PredefinedHoldoutSplit can be used as argument for cv parameters in scikit-learn's GridSearchCV etc. For performing a random split, see the related RandomHoldoutSplit class. Example 1 -- Iterating Over a PredefinedHoldoutSplit from mlxtend.evaluate import PredefinedHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() h_iter = PredefinedHoldoutSplit(valid_indices=[0, 1, 99]) cnt = 0 for train_ind, valid_ind in h_iter.split(X, y): cnt += 1 print(cnt) 1 print(train_ind[:5]) print(valid_ind[:5]) [2 3 4 5 6] [ 0 1 99] Example 2 -- PredefinedHoldoutSplit in GridSearch from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from mlxtend.evaluate import PredefinedHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() params = {'n_neighbors': [1, 2, 3, 4, 5]} grid = GridSearchCV(KNeighborsClassifier(), param_grid=params, cv=PredefinedHoldoutSplit(valid_indices=[0, 1, 99])) grid.fit(X, y) assert grid.n_splits_ == 1 print(grid.grid_scores_) [mean: 1.00000, std: 0.00000, params: {'n_neighbors': 1}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 2}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 3}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 4}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 5}] /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/model_selection/_search.py:762: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning) API PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting. Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"PredefinedHoldoutSplit"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#predefinedholdoutsplit","text":"Split a dataset into a train and validation subset for validation based on user-specified indices. from mlxtend.evaluate import PredefinedHoldoutSplit","title":"PredefinedHoldoutSplit"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#overview","text":"The PredefinedHoldoutSplit class serves as an alternative to scikit-learn's KFold class, where the PredefinedHoldoutSplit class splits a dataset into training and a validation subsets without rotation, based on validation indices specified by the user. The PredefinedHoldoutSplit can be used as argument for cv parameters in scikit-learn's GridSearchCV etc. For performing a random split, see the related RandomHoldoutSplit class.","title":"Overview"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#example-1-iterating-over-a-predefinedholdoutsplit","text":"from mlxtend.evaluate import PredefinedHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() h_iter = PredefinedHoldoutSplit(valid_indices=[0, 1, 99]) cnt = 0 for train_ind, valid_ind in h_iter.split(X, y): cnt += 1 print(cnt) 1 print(train_ind[:5]) print(valid_ind[:5]) [2 3 4 5 6] [ 0 1 99]","title":"Example 1 -- Iterating Over a PredefinedHoldoutSplit"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#example-2-predefinedholdoutsplit-in-gridsearch","text":"from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from mlxtend.evaluate import PredefinedHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() params = {'n_neighbors': [1, 2, 3, 4, 5]} grid = GridSearchCV(KNeighborsClassifier(), param_grid=params, cv=PredefinedHoldoutSplit(valid_indices=[0, 1, 99])) grid.fit(X, y) assert grid.n_splits_ == 1 print(grid.grid_scores_) [mean: 1.00000, std: 0.00000, params: {'n_neighbors': 1}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 2}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 3}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 4}, mean: 1.00000, std: 0.00000, params: {'n_neighbors': 5}] /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/model_selection/_search.py:762: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning)","title":"Example 2 -- PredefinedHoldoutSplit in GridSearch"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#api","text":"PredefinedHoldoutSplit(valid_indices) Train/Validation set splitter for sklearn's GridSearchCV etc. Uses user-specified train/validation set indices to split a dataset into train/validation sets using user-defined or random indices. Parameters valid_indices : array-like, shape (num_examples,) Indices of the training examples in the training set to be used for validation. All other indices in the training set are used to for a training subset for model fitting.","title":"API"},{"location":"user_guide/evaluate/PredefinedHoldoutSplit/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"user_guide/evaluate/RandomHoldoutSplit/","text":"RandomHoldoutSplit Randomly split a dataset into a train and validation subset for validation. from mlxtend.evaluate import RandomHoldoutSplit Overview The RandomHoldoutSplit class serves as an alternative to scikit-learn's KFold class, where the RandomHoldoutSplit class splits a dataset into training and a validation subsets without rotation. The RandomHoldoutSplit can be used as argument for cv parameters in scikit-learn's GridSearchCV etc. The term \"random\" in RandomHoldoutSplit comes from the fact that the split is specified by the random_seed rather than specifying the training and validation set indices manually as in the PredefinedHoldoutSplit class in mlxtend. Example 1 -- Iterating Over a RandomHoldoutSplit from mlxtend.evaluate import RandomHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() h_iter = RandomHoldoutSplit(valid_size=0.3, random_seed=123) cnt = 0 for train_ind, valid_ind in h_iter.split(X, y): cnt += 1 print(cnt) 1 print(train_ind[:5]) print(valid_ind[:5]) [ 60 16 88 130 6] [ 72 125 80 86 117] Example 2 -- RandomHoldoutSplit in GridSearch from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from mlxtend.evaluate import RandomHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() params = {'n_neighbors': [1, 2, 3, 4, 5]} grid = GridSearchCV(KNeighborsClassifier(), param_grid=params, cv=RandomHoldoutSplit(valid_size=0.3, random_seed=123)) grid.fit(X, y) assert grid.n_splits_ == 1 print(grid.grid_scores_) [mean: 0.95556, std: 0.00000, params: {'n_neighbors': 1}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 2}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 3}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 4}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 5}] /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/model_selection/_search.py:762: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning) API RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not Methods get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"RandomHoldoutSplit"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#randomholdoutsplit","text":"Randomly split a dataset into a train and validation subset for validation. from mlxtend.evaluate import RandomHoldoutSplit","title":"RandomHoldoutSplit"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#overview","text":"The RandomHoldoutSplit class serves as an alternative to scikit-learn's KFold class, where the RandomHoldoutSplit class splits a dataset into training and a validation subsets without rotation. The RandomHoldoutSplit can be used as argument for cv parameters in scikit-learn's GridSearchCV etc. The term \"random\" in RandomHoldoutSplit comes from the fact that the split is specified by the random_seed rather than specifying the training and validation set indices manually as in the PredefinedHoldoutSplit class in mlxtend.","title":"Overview"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#example-1-iterating-over-a-randomholdoutsplit","text":"from mlxtend.evaluate import RandomHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() h_iter = RandomHoldoutSplit(valid_size=0.3, random_seed=123) cnt = 0 for train_ind, valid_ind in h_iter.split(X, y): cnt += 1 print(cnt) 1 print(train_ind[:5]) print(valid_ind[:5]) [ 60 16 88 130 6] [ 72 125 80 86 117]","title":"Example 1 -- Iterating Over a RandomHoldoutSplit"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#example-2-randomholdoutsplit-in-gridsearch","text":"from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from mlxtend.evaluate import RandomHoldoutSplit from mlxtend.data import iris_data X, y = iris_data() params = {'n_neighbors': [1, 2, 3, 4, 5]} grid = GridSearchCV(KNeighborsClassifier(), param_grid=params, cv=RandomHoldoutSplit(valid_size=0.3, random_seed=123)) grid.fit(X, y) assert grid.n_splits_ == 1 print(grid.grid_scores_) [mean: 0.95556, std: 0.00000, params: {'n_neighbors': 1}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 2}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 3}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 4}, mean: 0.95556, std: 0.00000, params: {'n_neighbors': 5}] /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/model_selection/_search.py:762: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning)","title":"Example 2 -- RandomHoldoutSplit in GridSearch"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#api","text":"RandomHoldoutSplit(valid_size=0.5, random_seed=None, stratify=False) Train/Validation set splitter for sklearn's GridSearchCV etc. Provides train/validation set indices to split a dataset into train/validation sets using random indices. Parameters valid_size : float (default: 0.5) Proportion of examples that being assigned as validation examples. 1- valid_size will then automatically be assigned as training set examples. random_seed : int (default: None) The random seed for splitting the data into training and validation set partitions. stratify : bool (default: False) True or False, whether to perform a stratified split or not","title":"API"},{"location":"user_guide/evaluate/RandomHoldoutSplit/#methods","text":"get_n_splits(X=None, y=None, groups=None) Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : 1 Returns the number of splitting iterations in the cross-validator. Always returns 1. split(X, y, groups=None) Generate indices to split data into training and test set. Parameters X : array-like, shape (num_examples, num_features) Training data, where num_examples is the number of training examples and num_features is the number of features. y : array-like, shape (num_examples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields train_index : ndarray The training set indices for that split. valid_index : ndarray The validation set indices for that split.","title":"Methods"},{"location":"user_guide/evaluate/bootstrap/","text":"Bootstrap An implementation of the ordinary nonparametric bootstrap to bootstrap a single statistic (for example, the mean. median, R^2 of a regression fit, and so forth). from mlxtend.evaluate import bootstrap Overview The bootstrap offers an easy and effective way to estimate the distribution of a statistic via simulation, by drawing (or generating) new samples from an existing sample with replacement. Note that the bootstrap does not require making any assumptions about the sample statistic or dataset being normally distributed. Using the bootstrap, we can estimate sample statistics and compute the standard error of the mean and confidence intervals as if we have drawn a number of samples from an infinite population. In a nutshell, the bootstrap procedure can be described as follows: Draw a sample with replacement Compute the sample statistic Repeat step 1-2 n times Compute the standard deviation (standard error of the mean of the statistic) Compute the confidence interval Or, in simple terms, we can interpret the bootstrap a means of drawing a potentially endless number of (new) samples from a population by resampling the original dataset. Note that the term \"bootstrap replicate\" is being used quite loosely in current literature; many researchers and practitioners use it to define the number of bootstrap samples we draw from the original dataset. However, in the context of this documentation and the code annotation, we use the original definition of bootstrap repliactes and use it to refer to the statistic computed from a bootstrap sample. References [1] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997. Example 1 -- Bootstrapping the Mean This simple example illustrates how you could bootstrap the mean of a sample. import numpy as np from mlxtend.evaluate import bootstrap rng = np.random.RandomState(123) x = rng.normal(loc=5., size=100) original, std_err, ci_bounds = bootstrap(x, num_rounds=1000, func=np.mean, ci=0.95, seed=123) print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, std_err, ci_bounds[0], ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] Example 2 - Bootstrapping a Regression Fit This example illustrates how you can bootstrap the R^2 of a regression fit on the training data. from mlxtend.data import autompg_data from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score X, y = autompg_data() lr = LinearRegression() def r2_fit(X, model=lr): x, y = X[:, 0].reshape(-1, 1), X[:, 1] pred = lr.fit(x, y).predict(x) return r2_score(y, pred) original, std_err, ci_bounds = bootstrap(X, num_rounds=1000, func=r2_fit, ci=0.95, seed=123) print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, std_err, ci_bounds[0], ci_bounds[1])) Mean: 0.90, SE: +/- 0.01, CI95: [0.89, 0.92] API bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/","title":"Bootstrap"},{"location":"user_guide/evaluate/bootstrap/#bootstrap","text":"An implementation of the ordinary nonparametric bootstrap to bootstrap a single statistic (for example, the mean. median, R^2 of a regression fit, and so forth). from mlxtend.evaluate import bootstrap","title":"Bootstrap"},{"location":"user_guide/evaluate/bootstrap/#overview","text":"The bootstrap offers an easy and effective way to estimate the distribution of a statistic via simulation, by drawing (or generating) new samples from an existing sample with replacement. Note that the bootstrap does not require making any assumptions about the sample statistic or dataset being normally distributed. Using the bootstrap, we can estimate sample statistics and compute the standard error of the mean and confidence intervals as if we have drawn a number of samples from an infinite population. In a nutshell, the bootstrap procedure can be described as follows: Draw a sample with replacement Compute the sample statistic Repeat step 1-2 n times Compute the standard deviation (standard error of the mean of the statistic) Compute the confidence interval Or, in simple terms, we can interpret the bootstrap a means of drawing a potentially endless number of (new) samples from a population by resampling the original dataset. Note that the term \"bootstrap replicate\" is being used quite loosely in current literature; many researchers and practitioners use it to define the number of bootstrap samples we draw from the original dataset. However, in the context of this documentation and the code annotation, we use the original definition of bootstrap repliactes and use it to refer to the statistic computed from a bootstrap sample.","title":"Overview"},{"location":"user_guide/evaluate/bootstrap/#references","text":"[1] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997.","title":"References"},{"location":"user_guide/evaluate/bootstrap/#example-1-bootstrapping-the-mean","text":"This simple example illustrates how you could bootstrap the mean of a sample. import numpy as np from mlxtend.evaluate import bootstrap rng = np.random.RandomState(123) x = rng.normal(loc=5., size=100) original, std_err, ci_bounds = bootstrap(x, num_rounds=1000, func=np.mean, ci=0.95, seed=123) print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, std_err, ci_bounds[0], ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26]","title":"Example 1 -- Bootstrapping the Mean"},{"location":"user_guide/evaluate/bootstrap/#example-2-bootstrapping-a-regression-fit","text":"This example illustrates how you can bootstrap the R^2 of a regression fit on the training data. from mlxtend.data import autompg_data from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score X, y = autompg_data() lr = LinearRegression() def r2_fit(X, model=lr): x, y = X[:, 0].reshape(-1, 1), X[:, 1] pred = lr.fit(x, y).predict(x) return r2_score(y, pred) original, std_err, ci_bounds = bootstrap(X, num_rounds=1000, func=r2_fit, ci=0.95, seed=123) print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, std_err, ci_bounds[0], ci_bounds[1])) Mean: 0.90, SE: +/- 0.01, CI95: [0.89, 0.92]","title":"Example 2 - Bootstrapping a Regression Fit"},{"location":"user_guide/evaluate/bootstrap/#api","text":"bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None) Implements the ordinary nonparametric bootstrap Parameters x : NumPy array, shape=(n_samples, [n_columns]) An one or multidimensional array of data records func : A function which computes a statistic that is used to compute the bootstrap replicates (the statistic computed from the bootstrap samples). This function must return a scalar value. For example, np.mean or np.median would be an acceptable argument for func if x is a 1-dimensional array or vector. num_rounds : int (default=1000) The number of bootstrap samnples to draw where each bootstrap sample has the same number of records as the original dataset. ci : int (default=0.95) An integer in the range (0, 1) that represents the confidence level for computing the confidence interval. For example, ci=0.95 (default) will compute the 95% confidence interval from the bootstrap replicates. ddof : int The delta degrees of freedom used when computing the standard error. seed : int or None (default=None) Random seed for generating bootstrap samples. Returns original, standard_error, (lower_ci, upper_ci) : tuple Returns the statistic of the original sample ( original ), the standard error of the estimate, and the respective confidence interval bounds. Examples >>> from mlxtend.evaluate import bootstrap >>> rng = np.random.RandomState(123) >>> x = rng.normal(loc=5., size=100) >>> original, std_err, ci_bounds = bootstrap(x, ... num_rounds=1000, ... func=np.mean, ... ci=0.95, ... seed=123) >>> print('Mean: %.2f, SE: +/- %.2f, CI95: [%.2f, %.2f]' % (original, ... std_err, ... ci_bounds[0], ... ci_bounds[1])) Mean: 5.03, SE: +/- 0.11, CI95: [4.80, 5.26] >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/","title":"API"},{"location":"user_guide/evaluate/bootstrap_point632_score/","text":"bootstrap_point632_score An implementation of the .632 bootstrap to evaluate supervised learning algorithms. from mlxtend.evaluate import bootstrap_point632_score Overview Originally, the bootstrap method aims to determine the statistical properties of an estimator when the underlying distribution was unknown and additional samples are not available. Now, in order to exploit this method for the evaluation of predictive models, such as hypotheses for classification and regression, we may prefer a slightly different approach to bootstrapping using the so-called Out-Of-Bag (OOB) or Leave-One-Out Bootstrap (LOOB) technique. Here, we use out-of-bag samples as test sets for evaluation instead of evaluating the model on the training data. Out-of-bag samples are the unique sets of instances that are not used for model fitting as shown in the figure below [1]. The figure above illustrates how three random bootstrap samples drawn from an exemplary ten-sample dataset ( X_1,X_2, ..., X_{10} ) and their out-of-bag sample for testing may look like. In practice, Bradley Efron and Robert Tibshirani recommend drawing 50 to 200 bootstrap samples as being sufficient for reliable estimates [2]. .632 Bootstrap In 1983, Bradley Efron described the .632 Estimate , a further improvement to address the pessimistic bias of the bootstrap cross-validation approach described above [3]. The pessimistic bias in the \"classic\" bootstrap method can be attributed to the fact that the bootstrap samples only contain approximately 63.2% of the unique samples from the original dataset. For instance, we can compute the probability that a given sample from a dataset of size n is not drawn as a bootstrap sample as P (\\text{not chosen}) = \\bigg(1 - \\frac{1}{n}\\bigg)^n, which is asymptotically equivalent to \\frac{1}{e} \\approx 0.368 as n \\rightarrow \\infty. Vice versa, we can then compute the probability that a sample is chosen as P (\\text{chosen}) = 1 - \\bigg(1 - \\frac{1}{n}\\bigg)^n \\approx 0.632 for reasonably large datasets, so that we'd select approximately 0.632 \\times n uniques samples as bootstrap training sets and reserve 0.368 \\times n out-of-bag samples for testing in each iteration. Now, to address the bias that is due to this the sampling with replacement, Bradley Efron proposed the .632 Estimate that we mentioned earlier, which is computed via the following equation: \\text{ACC}_{boot} = \\frac{1}{b} \\sum_{i=1}^b \\big(0.632 \\cdot \\text{ACC}_{h, i} + 0.368 \\cdot \\text{ACC}_{r, i}\\big), where \\text{ACC}_{r, i} is the resubstitution accuracy, and \\text{ACC}_{h, i} is the accuracy on the out-of-bag sample. .632+ Bootstrap Now, while the .632 Boostrap attempts to address the pessimistic bias of the estimate, an optimistic bias may occur with models that tend to overfit so that Bradley Efron and Robert Tibshirani proposed the The .632+ Bootstrap Method (Efron and Tibshirani, 1997). Instead of using a fixed \"weight\" \\omega = 0.632 in ACC_{\\text{boot}} = \\frac{1}{b} \\sum_{i=1}^b \\big(\\omega \\cdot \\text{ACC}_{h, i} + (1-\\omega) \\cdot \\text{ACC}_{r, i} \\big), we compute the weight \\gamma as \\omega = \\frac{0.632}{1 - 0.368 \\times R}, where R is the relative overfitting rate R = \\frac{(-1) \\times (\\text{ACC}_{h, i} - \\text{ACC}_{r, i})}{\\gamma - (1 -\\text{ACC}_{h, i})}. (Since we are plugging \\omega into the equation for computing ACC_{boot} that we defined above, \\text{ACC}_{h, i} and \\text{ACC}_{r, i} still refer to the resubstitution and out-of-bag accuracy estimates in the i th bootstrap round, respectively.) Further, we need to determine the no-information rate \\gamma in order to compute R . For instance, we can compute \\gamma by fitting a model to a dataset that contains all possible combinations between samples x_{i'} and target class labels y_{i} \u2014 we pretend that the observations and class labels are independent: \\gamma = \\frac{1}{n^2} \\sum_{i=1}^{n} \\sum_{i '=1}^{n} L(y_{i}, f(x_{i '})). Alternatively, we can estimate the no-information rate \\gamma as follows: \\gamma = \\sum_{k=1}^K p_k (1 - q_k), where p_k is the proportion of class k samples observed in the dataset, and q_k is the proportion of class k samples that the classifier predicts in the dataset. References [1] https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html [2] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997. [3] Efron, Bradley. 1983. \u201cEstimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\u201d Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [4] Efron, Bradley, and Robert Tibshirani. 1997. \u201cImprovements on Cross-Validation: The .632+ Bootstrap Method.\u201d Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Example 1 -- Evaluating the predictive performance of a model via the classic out-of-bag Bootstrap The bootstrap_point632_score function mimics the behavior of scikit-learn's `cross_val_score, and a typically usage example is shown below: from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y, method='oob') acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 94.52% 95% Confidence interval: [88.88, 98.28] Example 2 -- Evaluating the predictive performance of a model via the .632 Bootstrap from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y) acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 96.58% 95% Confidence interval: [92.37, 98.97] Example 3 -- Evaluating the predictive performance of a model via the .632+ Bootstrap from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y, method='.632+') acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 96.40% 95% Confidence interval: [92.34, 99.00] API bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \u201cEstimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\u201d Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \u201cImprovements on Cross-Validation: The .632+ Bootstrap Method.\u201d Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/","title":"bootstrap_point632_score"},{"location":"user_guide/evaluate/bootstrap_point632_score/#bootstrap_point632_score","text":"An implementation of the .632 bootstrap to evaluate supervised learning algorithms. from mlxtend.evaluate import bootstrap_point632_score","title":"bootstrap_point632_score"},{"location":"user_guide/evaluate/bootstrap_point632_score/#overview","text":"Originally, the bootstrap method aims to determine the statistical properties of an estimator when the underlying distribution was unknown and additional samples are not available. Now, in order to exploit this method for the evaluation of predictive models, such as hypotheses for classification and regression, we may prefer a slightly different approach to bootstrapping using the so-called Out-Of-Bag (OOB) or Leave-One-Out Bootstrap (LOOB) technique. Here, we use out-of-bag samples as test sets for evaluation instead of evaluating the model on the training data. Out-of-bag samples are the unique sets of instances that are not used for model fitting as shown in the figure below [1]. The figure above illustrates how three random bootstrap samples drawn from an exemplary ten-sample dataset ( X_1,X_2, ..., X_{10} ) and their out-of-bag sample for testing may look like. In practice, Bradley Efron and Robert Tibshirani recommend drawing 50 to 200 bootstrap samples as being sufficient for reliable estimates [2].","title":"Overview"},{"location":"user_guide/evaluate/bootstrap_point632_score/#632-bootstrap","text":"In 1983, Bradley Efron described the .632 Estimate , a further improvement to address the pessimistic bias of the bootstrap cross-validation approach described above [3]. The pessimistic bias in the \"classic\" bootstrap method can be attributed to the fact that the bootstrap samples only contain approximately 63.2% of the unique samples from the original dataset. For instance, we can compute the probability that a given sample from a dataset of size n is not drawn as a bootstrap sample as P (\\text{not chosen}) = \\bigg(1 - \\frac{1}{n}\\bigg)^n, which is asymptotically equivalent to \\frac{1}{e} \\approx 0.368 as n \\rightarrow \\infty. Vice versa, we can then compute the probability that a sample is chosen as P (\\text{chosen}) = 1 - \\bigg(1 - \\frac{1}{n}\\bigg)^n \\approx 0.632 for reasonably large datasets, so that we'd select approximately 0.632 \\times n uniques samples as bootstrap training sets and reserve 0.368 \\times n out-of-bag samples for testing in each iteration. Now, to address the bias that is due to this the sampling with replacement, Bradley Efron proposed the .632 Estimate that we mentioned earlier, which is computed via the following equation: \\text{ACC}_{boot} = \\frac{1}{b} \\sum_{i=1}^b \\big(0.632 \\cdot \\text{ACC}_{h, i} + 0.368 \\cdot \\text{ACC}_{r, i}\\big), where \\text{ACC}_{r, i} is the resubstitution accuracy, and \\text{ACC}_{h, i} is the accuracy on the out-of-bag sample.","title":".632 Bootstrap"},{"location":"user_guide/evaluate/bootstrap_point632_score/#632-bootstrap_1","text":"Now, while the .632 Boostrap attempts to address the pessimistic bias of the estimate, an optimistic bias may occur with models that tend to overfit so that Bradley Efron and Robert Tibshirani proposed the The .632+ Bootstrap Method (Efron and Tibshirani, 1997). Instead of using a fixed \"weight\" \\omega = 0.632 in ACC_{\\text{boot}} = \\frac{1}{b} \\sum_{i=1}^b \\big(\\omega \\cdot \\text{ACC}_{h, i} + (1-\\omega) \\cdot \\text{ACC}_{r, i} \\big), we compute the weight \\gamma as \\omega = \\frac{0.632}{1 - 0.368 \\times R}, where R is the relative overfitting rate R = \\frac{(-1) \\times (\\text{ACC}_{h, i} - \\text{ACC}_{r, i})}{\\gamma - (1 -\\text{ACC}_{h, i})}. (Since we are plugging \\omega into the equation for computing ACC_{boot} that we defined above, \\text{ACC}_{h, i} and \\text{ACC}_{r, i} still refer to the resubstitution and out-of-bag accuracy estimates in the i th bootstrap round, respectively.) Further, we need to determine the no-information rate \\gamma in order to compute R . For instance, we can compute \\gamma by fitting a model to a dataset that contains all possible combinations between samples x_{i'} and target class labels y_{i} \u2014 we pretend that the observations and class labels are independent: \\gamma = \\frac{1}{n^2} \\sum_{i=1}^{n} \\sum_{i '=1}^{n} L(y_{i}, f(x_{i '})). Alternatively, we can estimate the no-information rate \\gamma as follows: \\gamma = \\sum_{k=1}^K p_k (1 - q_k), where p_k is the proportion of class k samples observed in the dataset, and q_k is the proportion of class k samples that the classifier predicts in the dataset.","title":".632+ Bootstrap"},{"location":"user_guide/evaluate/bootstrap_point632_score/#references","text":"[1] https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html [2] Efron, Bradley, and Robert J. Tibshirani. An introduction to the bootstrap. CRC press, 1994. Management of Data (ACM SIGMOD '97), pages 265-276, 1997. [3] Efron, Bradley. 1983. \u201cEstimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\u201d Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [4] Efron, Bradley, and Robert Tibshirani. 1997. \u201cImprovements on Cross-Validation: The .632+ Bootstrap Method.\u201d Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703.","title":"References"},{"location":"user_guide/evaluate/bootstrap_point632_score/#example-1-evaluating-the-predictive-performance-of-a-model-via-the-classic-out-of-bag-bootstrap","text":"The bootstrap_point632_score function mimics the behavior of scikit-learn's `cross_val_score, and a typically usage example is shown below: from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y, method='oob') acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 94.52% 95% Confidence interval: [88.88, 98.28]","title":"Example 1 -- Evaluating the predictive performance of a model via the classic out-of-bag Bootstrap"},{"location":"user_guide/evaluate/bootstrap_point632_score/#example-2-evaluating-the-predictive-performance-of-a-model-via-the-632-bootstrap","text":"from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y) acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 96.58% 95% Confidence interval: [92.37, 98.97]","title":"Example 2 -- Evaluating the predictive performance of a model via the .632 Bootstrap"},{"location":"user_guide/evaluate/bootstrap_point632_score/#example-3-evaluating-the-predictive-performance-of-a-model-via-the-632-bootstrap","text":"from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from mlxtend.evaluate import bootstrap_point632_score import numpy as np iris = datasets.load_iris() X = iris.data y = iris.target tree = DecisionTreeClassifier(random_state=0) # Model accuracy scores = bootstrap_point632_score(tree, X, y, method='.632+') acc = np.mean(scores) print('Accuracy: %.2f%%' % (100*acc)) # Confidence interval lower = np.percentile(scores, 2.5) upper = np.percentile(scores, 97.5) print('95%% Confidence interval: [%.2f, %.2f]' % (100*lower, 100*upper)) Accuracy: 96.40% 95% Confidence interval: [92.34, 99.00]","title":"Example 3 -- Evaluating the predictive performance of a model via the .632+ Bootstrap"},{"location":"user_guide/evaluate/bootstrap_point632_score/#api","text":"bootstrap_point632_score(estimator, X, y, n_splits=200, method='.632', scoring_func=None, random_seed=None, clone_estimator=True) Implementation of the .632 [1] and .632+ [2] bootstrap for supervised learning References: [1] Efron, Bradley. 1983. \u201cEstimating the Error Rate of a Prediction Rule: Improvement on Cross-Validation.\u201d Journal of the American Statistical Association 78 (382): 316. doi:10.2307/2288636. [2] Efron, Bradley, and Robert Tibshirani. 1997. \u201cImprovements on Cross-Validation: The .632+ Bootstrap Method.\u201d Journal of the American Statistical Association 92 (438): 548. doi:10.2307/2965703. Parameters estimator : object An estimator for classification or regression that follows the scikit-learn API and implements \"fit\" and \"predict\" methods. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. n_splits : int (default=200) Number of bootstrap iterations. Must be larger than 1. method : str (default='.632') The bootstrap method, which can be either - 1) '.632' bootstrap (default) - 2) '.632+' bootstrap - 3) 'oob' (regular out-of-bag, no weighting) for comparison studies. scoring_func : callable, Score function (or loss function) with signature scoring_func(y, y_pred, **kwargs) . If none, uses classification accuracy if the estimator is a classifier and mean squared error if the estimator is a regressor. random_seed : int (default=None) If int, random_seed is the seed used by the random number generator. clone_estimator : bool (default=True) Clones the estimator if true, otherwise fits the original. Returns scores : array of float, shape=(len(list(n_splits)),) Array of scores of the estimator for each bootstrap replicate. Examples >>> from sklearn import datasets, linear_model >>> from mlxtend.evaluate import bootstrap_point632_score >>> iris = datasets.load_iris() >>> X = iris.data >>> y = iris.target >>> lr = linear_model.LogisticRegression() >>> scores = bootstrap_point632_score(lr, X, y) >>> acc = np.mean(scores) >>> print('Accuracy:', acc) 0.953023146884 >>> lower = np.percentile(scores, 2.5) >>> upper = np.percentile(scores, 97.5) >>> print('95%% Confidence interval: [%.2f, %.2f]' % (lower, upper)) 95% Confidence interval: [0.90, 0.98] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/","title":"API"},{"location":"user_guide/evaluate/cochrans_q/","text":"Cochran's Q Test Cochran's Q test for comparing the performance of multiple classifiers. from mlxtend.evaluate import cochrans_q Overview Cochran's Q test can be regarded as a generalized version of McNemar's test that can be applied to evaluate multiple classifiers. In a sense, Cochran's Q test is analogous to ANOVA for binary outcomes. To compare more than two classifiers, we can use Cochran's Q test, which has a test statistic Q that is approximately, (similar to McNemar's test), distributed as chi-squared with L-1 degrees of freedom, where L is the number of models we evaluate (since L=2 for McNemar's test, McNemars test statistic approximates a chi-squared distribution with one degree of freedom). More formally, Cochran's Q test tests the hypothesis that there is no difference between the classification accuracies [1]: p_i: H_0 = p_1 = p_2 = \\cdots = p_L. Let \\{D_1, \\dots , D_L\\} be a set of classifiers who have all been tested on the same dataset. If the L classifiers don't perform differently, then the following Q statistic is distributed approximately as \"chi-squared\" with L-1 degrees of freedom: Q_C = (L-1) \\frac{L \\sum^{L}_{i=1}G_{i}^{2} - T^2}{LT - \\sum^{N_{ts}}_{j=1} (L_j)^2}. Here, G_i is the number of objects out of N_{ts} correctly classified by D_i= 1, \\dots L ; L_j is the number of classifiers out of L that correctly classified object \\mathbf{z}_j \\in \\mathbf{Z}_{ts} , where \\mathbf{Z}_{ts} = \\{\\mathbf{z}_1, ... \\mathbf{z}_{N_{ts}}\\} is the test dataset on which the classifers are tested on; and T is the total number of correct number of votes among the L classifiers [2]: T = \\sum_{i=1}^{L} G_i = \\sum^{N_{ts}}_{j=1} L_j. To perform Cochran's Q test, we typically organize the classificier predictions in a binary N_{ts} \\times L matrix. The ij\\text{th} entry of such matrix is 0 if a classifier D_j has misclassified a data example (vector) \\mathbf{z}_i and 1 otherwise (if the classifier predicted the class label l(\\mathbf{z}_i) correctly) [2]. The following example taken from [2] illustrates how the classification results may be organized. For instance, assume we have the ground truth labels of the test dataset y_true and the following predictions by 3 classifiers ( y_model_1 , y_model_2 , and y_model_3 ): y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) The table of correct (1) and incorrect (0) classifications may then look as follows: D_1 (model 1) D_2 (model 2) D_3 (model 3) Occurrences 1 1 1 80 1 1 0 2 1 0 1 0 1 0 0 2 0 1 1 9 0 1 0 1 0 0 1 3 0 0 0 3 Accuracy 84/100*100% = 84% 92/100*100% = 92% 92/100*100% = 92% By plugging in the respective value into the previous equation, we obtain the following Q value [2]: Q_c = 2 \\times \\frac{3 \\times (84^2 + 92^2 + 92^2) - 268^2}{3\\times 268-(80 \\times 9 + 11 \\times 4 + 6 \\times 1)} \\approx 7.5294. (Note that the Q value in [2] is listed as 3.7647 due to a typo as discussed with the author, the value 7.5294 is the correct one.) Now, the Q value (approximating \\chi^2 ) corresponds to a p-value of approx. 0.023 assuming a \\chi^2 distribution with L-1 = 2 degrees of freedom. Assuming that we chose a significance level of \\alpha=0.05 , we would reject the null hypothesis that all classifiers perform equally well, since 0.023 < \\alpha . In practice, if we successfully rejected the null hypothesis, we could perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions. References [1] Fleiss, Joseph L., Bruce Levin, and Myunghee Cho Paik. Statistical methods for rates and proportions. John Wiley & Sons, 2013. [2] Kuncheva, Ludmila I. Combining pattern classifiers: methods and algorithms. John Wiley & Sons, 2004. Example 1 - Cochran's Q test import numpy as np from mlxtend.evaluate import cochrans_q from mlxtend.evaluate import mcnemar_table from mlxtend.evaluate import mcnemar ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) Assuming a significance level \\alpha=0.05 , we can conduct Cochran's Q test as follows, to test the null hypothesis there is no difference between the classification accuracies, p_i: H_0 = p_1 = p_2 = \\cdots = p_L : q, p_value = cochrans_q(y_true, y_model_1, y_model_2, y_model_3) print('Q: %.3f' % q) print('p-value: %.3f' % p_value) Q: 7.529 p-value: 0.023 Since the p-value is smaller than \\alpha , we can reject the null hypothesis and conclude that there is a difference between the classification accuracies. As mentioned in the introduction earlier, we could now perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions. Lastly, let's illustrate that Cochran's Q test is indeed just a generalized version of McNemar's test: chi2, p_value = cochrans_q(y_true, y_model_1, y_model_2) print('Cochran\\'s Q Chi^2: %.3f' % chi2) print('Cochran\\'s Q p-value: %.3f' % p_value) Cochran's Q Chi^2: 5.333 Cochran's Q p-value: 0.021 chi2, p_value = mcnemar(mcnemar_table(y_true, y_model_1, y_model_2), corrected=False) print('McNemar\\'s Chi^2: %.3f' % chi2) print('McNemar\\'s p-value: %.3f' % p_value) McNemar's Chi^2: 5.333 McNemar's p-value: 0.021 API cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/","title":"Cochran's Q Test"},{"location":"user_guide/evaluate/cochrans_q/#cochrans-q-test","text":"Cochran's Q test for comparing the performance of multiple classifiers. from mlxtend.evaluate import cochrans_q","title":"Cochran's Q Test"},{"location":"user_guide/evaluate/cochrans_q/#overview","text":"Cochran's Q test can be regarded as a generalized version of McNemar's test that can be applied to evaluate multiple classifiers. In a sense, Cochran's Q test is analogous to ANOVA for binary outcomes. To compare more than two classifiers, we can use Cochran's Q test, which has a test statistic Q that is approximately, (similar to McNemar's test), distributed as chi-squared with L-1 degrees of freedom, where L is the number of models we evaluate (since L=2 for McNemar's test, McNemars test statistic approximates a chi-squared distribution with one degree of freedom). More formally, Cochran's Q test tests the hypothesis that there is no difference between the classification accuracies [1]: p_i: H_0 = p_1 = p_2 = \\cdots = p_L. Let \\{D_1, \\dots , D_L\\} be a set of classifiers who have all been tested on the same dataset. If the L classifiers don't perform differently, then the following Q statistic is distributed approximately as \"chi-squared\" with L-1 degrees of freedom: Q_C = (L-1) \\frac{L \\sum^{L}_{i=1}G_{i}^{2} - T^2}{LT - \\sum^{N_{ts}}_{j=1} (L_j)^2}. Here, G_i is the number of objects out of N_{ts} correctly classified by D_i= 1, \\dots L ; L_j is the number of classifiers out of L that correctly classified object \\mathbf{z}_j \\in \\mathbf{Z}_{ts} , where \\mathbf{Z}_{ts} = \\{\\mathbf{z}_1, ... \\mathbf{z}_{N_{ts}}\\} is the test dataset on which the classifers are tested on; and T is the total number of correct number of votes among the L classifiers [2]: T = \\sum_{i=1}^{L} G_i = \\sum^{N_{ts}}_{j=1} L_j. To perform Cochran's Q test, we typically organize the classificier predictions in a binary N_{ts} \\times L matrix. The ij\\text{th} entry of such matrix is 0 if a classifier D_j has misclassified a data example (vector) \\mathbf{z}_i and 1 otherwise (if the classifier predicted the class label l(\\mathbf{z}_i) correctly) [2]. The following example taken from [2] illustrates how the classification results may be organized. For instance, assume we have the ground truth labels of the test dataset y_true and the following predictions by 3 classifiers ( y_model_1 , y_model_2 , and y_model_3 ): y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) The table of correct (1) and incorrect (0) classifications may then look as follows: D_1 (model 1) D_2 (model 2) D_3 (model 3) Occurrences 1 1 1 80 1 1 0 2 1 0 1 0 1 0 0 2 0 1 1 9 0 1 0 1 0 0 1 3 0 0 0 3 Accuracy 84/100*100% = 84% 92/100*100% = 92% 92/100*100% = 92% By plugging in the respective value into the previous equation, we obtain the following Q value [2]: Q_c = 2 \\times \\frac{3 \\times (84^2 + 92^2 + 92^2) - 268^2}{3\\times 268-(80 \\times 9 + 11 \\times 4 + 6 \\times 1)} \\approx 7.5294. (Note that the Q value in [2] is listed as 3.7647 due to a typo as discussed with the author, the value 7.5294 is the correct one.) Now, the Q value (approximating \\chi^2 ) corresponds to a p-value of approx. 0.023 assuming a \\chi^2 distribution with L-1 = 2 degrees of freedom. Assuming that we chose a significance level of \\alpha=0.05 , we would reject the null hypothesis that all classifiers perform equally well, since 0.023 < \\alpha . In practice, if we successfully rejected the null hypothesis, we could perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions.","title":"Overview"},{"location":"user_guide/evaluate/cochrans_q/#references","text":"[1] Fleiss, Joseph L., Bruce Levin, and Myunghee Cho Paik. Statistical methods for rates and proportions. John Wiley & Sons, 2013. [2] Kuncheva, Ludmila I. Combining pattern classifiers: methods and algorithms. John Wiley & Sons, 2004.","title":"References"},{"location":"user_guide/evaluate/cochrans_q/#example-1-cochrans-q-test","text":"import numpy as np from mlxtend.evaluate import cochrans_q from mlxtend.evaluate import mcnemar_table from mlxtend.evaluate import mcnemar ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) Assuming a significance level \\alpha=0.05 , we can conduct Cochran's Q test as follows, to test the null hypothesis there is no difference between the classification accuracies, p_i: H_0 = p_1 = p_2 = \\cdots = p_L : q, p_value = cochrans_q(y_true, y_model_1, y_model_2, y_model_3) print('Q: %.3f' % q) print('p-value: %.3f' % p_value) Q: 7.529 p-value: 0.023 Since the p-value is smaller than \\alpha , we can reject the null hypothesis and conclude that there is a difference between the classification accuracies. As mentioned in the introduction earlier, we could now perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions. Lastly, let's illustrate that Cochran's Q test is indeed just a generalized version of McNemar's test: chi2, p_value = cochrans_q(y_true, y_model_1, y_model_2) print('Cochran\\'s Q Chi^2: %.3f' % chi2) print('Cochran\\'s Q p-value: %.3f' % p_value) Cochran's Q Chi^2: 5.333 Cochran's Q p-value: 0.021 chi2, p_value = mcnemar(mcnemar_table(y_true, y_model_1, y_model_2), corrected=False) print('McNemar\\'s Chi^2: %.3f' % chi2) print('McNemar\\'s p-value: %.3f' % p_value) McNemar's Chi^2: 5.333 McNemar's p-value: 0.021","title":"Example 1 - Cochran's Q test"},{"location":"user_guide/evaluate/cochrans_q/#api","text":"cochrans_q(y_target, y_model_predictions)* Cochran's Q test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns q, p : float or None, float Returns the Q (chi-squared) value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/","title":"API"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/","text":"5x2cv combined F test 5x2cv combined F test procedure to compare the performance of two models from mlxtend.evaluate import combined_ftest_5x2cv Overview The 5x2cv combined F test is a procedure for comparing the performance of two models (classifiers or regressors) that was proposed by Alpaydin [1] as a more robust alternative to Dietterich's 5x2cv paired t-test procedure [2]. paired_ttest_5x2cv.md . Dietterich's 5x2cv method was in turn designed to address shortcomings in other methods such as the resampled paired t test (see paired_ttest_resampled ) and the k-fold cross-validated paired t test (see paired_ttest_kfold_cv ). To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the 5x2cv paired t test, we repeat the splitting (50% training and 50% test data) 5 times. In each of the 5 iterations, we fit A and B to the training split and evaluate their performance ( p_A and p_B ) on the test split. Then, we rotate the training and test sets (the training set becomes the test set and vice versa) compute the performance again, which results in 2 performance difference measures: p^{(1)} = p^{(1)}_A - p^{(1)}_B and p^{(2)} = p^{(2)}_A - p^{(2)}_B. Then, we estimate mean and variance of the differences: \\overline{p} = \\frac{p^{(1)} + p^{(2)}}{2} and s^2 = (p^{(1)} - \\overline{p})^2 + (p^{(2)} - \\overline{p})^2. The F-statistic proposed by Alpaydin (see paper for justifications) is then computed as \\mathcal{f} = \\frac{\\sum_{i=1}^{5} \\sum_{j=1}^2 (p_i^{j})^2}{2 \\sum_{i=1}^5 s_i^2}, which is approximately F distributed with 10 and 5 degress of freedom. Using the f statistic, the p value can be computed and compared with a previously chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. References [1] Alpaydin, E. (1999). Combined 5\u00d72 cv F test for comparing supervised classification learning algorithms. Neural computation, 11(8), 1885-1892. [2] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923. Example 1 - 5x2cv combined F test Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1, solver='liblinear', multi_class='ovr') clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired f test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the 5x2cv f test: from mlxtend.evaluate import combined_ftest_5x2cv f, p = combined_ftest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('F statistic: %.3f' % f) print('p value: %.3f' % p) F statistic: 1.053 p value: 0.509 Since p > \\alpha , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) f, p = combined_ftest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('F statistic: %.3f' % f) print('p value: %.3f' % p) Decision tree accuracy: 63.16% F statistic: 34.934 p value: 0.001 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha . API combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/","title":"5x2cv combined *F* test"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/#5x2cv-combined-f-test","text":"5x2cv combined F test procedure to compare the performance of two models from mlxtend.evaluate import combined_ftest_5x2cv","title":"5x2cv combined F test"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/#overview","text":"The 5x2cv combined F test is a procedure for comparing the performance of two models (classifiers or regressors) that was proposed by Alpaydin [1] as a more robust alternative to Dietterich's 5x2cv paired t-test procedure [2]. paired_ttest_5x2cv.md . Dietterich's 5x2cv method was in turn designed to address shortcomings in other methods such as the resampled paired t test (see paired_ttest_resampled ) and the k-fold cross-validated paired t test (see paired_ttest_kfold_cv ). To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the 5x2cv paired t test, we repeat the splitting (50% training and 50% test data) 5 times. In each of the 5 iterations, we fit A and B to the training split and evaluate their performance ( p_A and p_B ) on the test split. Then, we rotate the training and test sets (the training set becomes the test set and vice versa) compute the performance again, which results in 2 performance difference measures: p^{(1)} = p^{(1)}_A - p^{(1)}_B and p^{(2)} = p^{(2)}_A - p^{(2)}_B. Then, we estimate mean and variance of the differences: \\overline{p} = \\frac{p^{(1)} + p^{(2)}}{2} and s^2 = (p^{(1)} - \\overline{p})^2 + (p^{(2)} - \\overline{p})^2. The F-statistic proposed by Alpaydin (see paper for justifications) is then computed as \\mathcal{f} = \\frac{\\sum_{i=1}^{5} \\sum_{j=1}^2 (p_i^{j})^2}{2 \\sum_{i=1}^5 s_i^2}, which is approximately F distributed with 10 and 5 degress of freedom. Using the f statistic, the p value can be computed and compared with a previously chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models.","title":"Overview"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/#references","text":"[1] Alpaydin, E. (1999). Combined 5\u00d72 cv F test for comparing supervised classification learning algorithms. Neural computation, 11(8), 1885-1892. [2] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923.","title":"References"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/#example-1-5x2cv-combined-f-test","text":"Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1, solver='liblinear', multi_class='ovr') clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired f test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the 5x2cv f test: from mlxtend.evaluate import combined_ftest_5x2cv f, p = combined_ftest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('F statistic: %.3f' % f) print('p value: %.3f' % p) F statistic: 1.053 p value: 0.509 Since p > \\alpha , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) f, p = combined_ftest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('F statistic: %.3f' % f) print('p value: %.3f' % p) Decision tree accuracy: 63.16% F statistic: 34.934 p value: 0.001 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha .","title":"Example 1 - 5x2cv combined F test"},{"location":"user_guide/evaluate/combined_ftest_5x2cv/#api","text":"combined_ftest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv combined F test proposed by Alpaydin 1999, to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns f : float The F-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/","title":"API"},{"location":"user_guide/evaluate/confusion_matrix/","text":"Confusion Matrix Functions for generating confusion matrices. from mlxtend.evaluate import confusion_matrix from mlxtend.plotting import plot_confusion_matrix Overview Confusion Matrix The confusion matrix (or error matrix ) is one way to summarize the performance of a classifier for binary classification tasks. This square matrix consists of columns and rows that list the number of instances as absolute or relative \"actual class\" vs. \"predicted class\" ratios. Let P be the label of class 1 and N be the label of a second class or the label of all classes that are not class 1 in a multi-class setting. References - Example 1 - Binary classification from mlxtend.evaluate import confusion_matrix y_target = [0, 0, 1, 0, 0, 1, 1, 1] y_predicted = [1, 0, 1, 0, 0, 0, 0, 1] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted) cm array([[3, 1], [2, 2]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : import matplotlib.pyplot as plt from mlxtend.plotting import plot_confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show() Example 2 - Multi-class classification from mlxtend.evaluate import confusion_matrix y_target = [1, 1, 1, 0, 0, 2, 0, 3] y_predicted = [1, 0, 1, 0, 0, 2, 1, 3] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted, binary=False) cm array([[2, 1, 0, 0], [1, 2, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : import matplotlib.pyplot as plt from mlxtend.evaluate import confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show() Example 3 - Multi-class to binary By setting binary=True , all class labels that are not the positive class label are being summarized to class 0. The positive class label becomes class 1. import matplotlib.pyplot as plt from mlxtend.evaluate import confusion_matrix y_target = [1, 1, 1, 0, 0, 2, 0, 3] y_predicted = [1, 0, 1, 0, 0, 2, 1, 3] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted, binary=True, positive_label=1) cm array([[4, 1], [1, 2]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : from mlxtend.plotting import plot_confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show() API confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/","title":"Confusion Matrix"},{"location":"user_guide/evaluate/confusion_matrix/#confusion-matrix","text":"Functions for generating confusion matrices. from mlxtend.evaluate import confusion_matrix from mlxtend.plotting import plot_confusion_matrix","title":"Confusion Matrix"},{"location":"user_guide/evaluate/confusion_matrix/#overview","text":"","title":"Overview"},{"location":"user_guide/evaluate/confusion_matrix/#confusion-matrix_1","text":"The confusion matrix (or error matrix ) is one way to summarize the performance of a classifier for binary classification tasks. This square matrix consists of columns and rows that list the number of instances as absolute or relative \"actual class\" vs. \"predicted class\" ratios. Let P be the label of class 1 and N be the label of a second class or the label of all classes that are not class 1 in a multi-class setting.","title":"Confusion Matrix"},{"location":"user_guide/evaluate/confusion_matrix/#references","text":"-","title":"References"},{"location":"user_guide/evaluate/confusion_matrix/#example-1-binary-classification","text":"from mlxtend.evaluate import confusion_matrix y_target = [0, 0, 1, 0, 0, 1, 1, 1] y_predicted = [1, 0, 1, 0, 0, 0, 0, 1] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted) cm array([[3, 1], [2, 2]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : import matplotlib.pyplot as plt from mlxtend.plotting import plot_confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show()","title":"Example 1 - Binary classification"},{"location":"user_guide/evaluate/confusion_matrix/#example-2-multi-class-classification","text":"from mlxtend.evaluate import confusion_matrix y_target = [1, 1, 1, 0, 0, 2, 0, 3] y_predicted = [1, 0, 1, 0, 0, 2, 1, 3] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted, binary=False) cm array([[2, 1, 0, 0], [1, 2, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : import matplotlib.pyplot as plt from mlxtend.evaluate import confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show()","title":"Example 2 - Multi-class classification"},{"location":"user_guide/evaluate/confusion_matrix/#example-3-multi-class-to-binary","text":"By setting binary=True , all class labels that are not the positive class label are being summarized to class 0. The positive class label becomes class 1. import matplotlib.pyplot as plt from mlxtend.evaluate import confusion_matrix y_target = [1, 1, 1, 0, 0, 2, 0, 3] y_predicted = [1, 0, 1, 0, 0, 2, 1, 3] cm = confusion_matrix(y_target=y_target, y_predicted=y_predicted, binary=True, positive_label=1) cm array([[4, 1], [1, 2]]) To visualize the confusion matrix using matplotlib, see the utility function mlxtend.plotting.plot_confusion_matrix : from mlxtend.plotting import plot_confusion_matrix fig, ax = plot_confusion_matrix(conf_mat=cm) plt.show()","title":"Example 3 - Multi-class to binary"},{"location":"user_guide/evaluate/confusion_matrix/#api","text":"confusion_matrix(y_target, y_predicted, binary=False, positive_label=1) Compute a confusion matrix/contingency table. Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: False) Maps a multi-class problem onto a binary confusion matrix, where the positive class is 1 and all other classes are 0. positive_label : int (default: 1) Class label of the positive class. Returns mat : array-like, shape=[n_classes, n_classes] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/","title":"API"},{"location":"user_guide/evaluate/feature_importance_permutation/","text":"Feature Importance Permutation A function to estimate the feature importance of classifiers and regressors based on permutation importance . from mlxtend.evaluate import feature_importance_permutation Overview The permutation importance is an intuitive, model-agnostic method to estimate the feature importance for classifier and regression models. The approach is relatively simple and straight-forward: Take a model that was fit to the training dataset Estimate the predictive performance of the model on an independent dataset (e.g., validation dataset) and record it as the baseline performance For each feature i : randomly permute feature column i in the original dataset record the predictive performance of the model on the dataset with the permuted column compute the feature importance as the difference between the baseline performance (step 2) and the performance on the permuted dataset Permutation importance is generally considered as a relatively efficient technique that works well in practice [1], while a drawback is that the importance of correlated features may be overestimated [2]. References [1] Terence Parr, Kerem Turgutlu, Christopher Csiszar, and Jeremy Howard. Beware Default Random Forest Importances (http://parrt.cs.usfca.edu/doc/rf-importance/index.html) [2] Strobl, C., Boulesteix, A. L., Kneib, T., Augustin, T., & Zeileis, A. (2008). Conditional variable importance for random forests. BMC bioinformatics, 9(1), 307. Example 1 -- Feature Importance for Classifiers The following example illustrates the feature importance estimation via permutation importance based for classification models. import numpy as np import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.model_selection import train_test_split from mlxtend.evaluate import feature_importance_permutation Generate a toy dataset from sklearn.datasets import make_classification from sklearn.ensemble import RandomForestClassifier # Build a classification task using 3 informative features X, y = make_classification(n_samples=10000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, n_classes=2, random_state=0, shuffle=False) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=1, stratify=y) Feature importance via random forest First, we compute the feature importance directly from the random forest via mean impurity decrease (described after the code section): forest = RandomForestClassifier(n_estimators=250, random_state=0) forest.fit(X_train, y_train) print('Training accuracy:', np.mean(forest.predict(X_train) == y_train)*100) print('Test accuracy:', np.mean(forest.predict(X_test) == y_test)*100) importance_vals = forest.feature_importances_ print(importance_vals) Training accuracy: 100.0 Test accuracy: 95.0666666667 [ 0.283357 0.30846795 0.24204291 0.02229767 0.02364941 0.02390578 0.02501543 0.0234225 0.02370816 0.0241332 ] There are several strategies for computing the feature importance in random forest. The method implemented in scikit-learn (used in the next code example) is based on the Breiman and Friedman's CART (Breiman, Friedman, \"Classification and regression trees\", 1984), the so-called mean impurity decrease . Here, the importance value of a features is computed by averaging the impurity decrease for that feature, when splitting a parent node into two child nodes, across all the trees in the ensemble. Note that the impurity decrease values are weighted by the number of samples that are in the respective nodes. This process is repeated for all features in the dataset, and the feature importance values are then normalized so that they sum up to 1. In CART, the authors also note that this fast way of computing feature importance values is relatively consistent with the permutation importance. Next, let's visualize the feature importance values from the random forest including a measure of the mean impurity decrease variability (here: standard deviation): std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importance_vals)[::-1] # Plot the feature importances of the forest plt.figure() plt.title(\"Random Forest feature importance\") plt.bar(range(X.shape[1]), importance_vals[indices], yerr=std[indices], align=\"center\") plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show() As we can see, the features 1, 0, and 2 are estimated to be the most informative ones for the random forest classier. Next, let's compute the feature importance via the permutation importance approach. Permutation Importance imp_vals, _ = feature_importance_permutation( predict_method=forest.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=1, seed=1) imp_vals array([ 0.26833333, 0.26733333, 0.261 , -0.002 , -0.00033333, 0.00066667, 0.00233333, 0.00066667, 0.00066667, -0.00233333]) Note that the feature_importance_permutation returns two arrays. The first array (here: imp_vals ) contains the actual importance values we are interested in. If num_rounds > 1 , the permutation is repeated multiple times (with different random seeds), and in this case the first array contains the average value of the importance computed from the different runs. The second array (here, assigned to _ , because we are not using it) then contains all individual values from these runs (more about that later). Now, let's also visualize the importance values in a barplot: indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"Random Forest feature importance via permutation importance\") plt.bar(range(X.shape[1]), imp_vals[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show() As we can see, also here, features 1, 0, and 2 are predicted to be the most important ones, which is consistent with the feature importance values that we computed via the mean impurity decrease method earlier. (Note that in the context of random forests, the feature importance via permutation importance is typically computed using the out-of-bag samples of a random forest, whereas in this implementation, an independent dataset is used.) Previously, it was mentioned that the permutation is repeated multiple times if num_rounds > 1 . In this case, the second array returned by the feature_importance_permutation contains the importance values for these individual runs (the array has shape [num_features, num_rounds), which we can use to compute some sort of variability between these runs. imp_vals, imp_all = feature_importance_permutation( predict_method=forest.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=10, seed=1) std = np.std(imp_all, axis=1) indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"Random Forest feature importance via permutation importance w. std. dev.\") plt.bar(range(X.shape[1]), imp_vals[indices], yerr=std[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show() It shall be noted that the feature importance values do not sum up to one, since they are not normalized (you can normalize them if you'd like, by dividing these by the sum of importance values). Here, the main point is to look at the importance values relative to each other and not to over-interpret the absolute values. Support Vector Machines While the permutation importance approach yields results that are generally consistent with the mean impurity decrease feature importance values from a random forest, it's a method that is model-agnostic and can be used with any kind of classifier or regressor. The example below applies the feature_importance_permutation function to a support vector machine: from sklearn.svm import SVC svm = SVC(C=1.0, kernel='rbf') svm.fit(X_train, y_train) print('Training accuracy', np.mean(svm.predict(X_train) == y_train)*100) print('Test accuracy', np.mean(svm.predict(X_test) == y_test)*100) Training accuracy 95.0857142857 Test accuracy 94.9666666667 imp_vals, imp_all = feature_importance_permutation( predict_method=svm.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=10, seed=1) std = np.std(imp_all, axis=1) indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"SVM feature importance via permutation importance\") plt.bar(range(X.shape[1]), imp_vals[indices], yerr=std[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show() Example 1 -- Feature Importance for Regressors import numpy as np import matplotlib.pyplot as plt from mlxtend.evaluate import feature_importance_permutation from sklearn.model_selection import train_test_split from sklearn.datasets import make_regression from sklearn.svm import SVR X, y = make_regression(n_samples=1000, n_features=5, n_informative=2, n_targets=1, random_state=123, shuffle=False) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=123) svm = SVR(kernel='rbf') svm.fit(X_train, y_train) imp_vals, _ = feature_importance_permutation( predict_method=svm.predict, X=X_test, y=y_test, metric='r2', num_rounds=1, seed=1) imp_vals array([ 0.43676245, 0.22231268, 0.00146906, 0.01611528, -0.00522067]) plt.figure() plt.bar(range(X.shape[1]), imp_vals) plt.xticks(range(X.shape[1])) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show() API feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/","title":"Feature Importance Permutation"},{"location":"user_guide/evaluate/feature_importance_permutation/#feature-importance-permutation","text":"A function to estimate the feature importance of classifiers and regressors based on permutation importance . from mlxtend.evaluate import feature_importance_permutation","title":"Feature Importance Permutation"},{"location":"user_guide/evaluate/feature_importance_permutation/#overview","text":"The permutation importance is an intuitive, model-agnostic method to estimate the feature importance for classifier and regression models. The approach is relatively simple and straight-forward: Take a model that was fit to the training dataset Estimate the predictive performance of the model on an independent dataset (e.g., validation dataset) and record it as the baseline performance For each feature i : randomly permute feature column i in the original dataset record the predictive performance of the model on the dataset with the permuted column compute the feature importance as the difference between the baseline performance (step 2) and the performance on the permuted dataset Permutation importance is generally considered as a relatively efficient technique that works well in practice [1], while a drawback is that the importance of correlated features may be overestimated [2].","title":"Overview"},{"location":"user_guide/evaluate/feature_importance_permutation/#references","text":"[1] Terence Parr, Kerem Turgutlu, Christopher Csiszar, and Jeremy Howard. Beware Default Random Forest Importances (http://parrt.cs.usfca.edu/doc/rf-importance/index.html) [2] Strobl, C., Boulesteix, A. L., Kneib, T., Augustin, T., & Zeileis, A. (2008). Conditional variable importance for random forests. BMC bioinformatics, 9(1), 307.","title":"References"},{"location":"user_guide/evaluate/feature_importance_permutation/#example-1-feature-importance-for-classifiers","text":"The following example illustrates the feature importance estimation via permutation importance based for classification models. import numpy as np import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.model_selection import train_test_split from mlxtend.evaluate import feature_importance_permutation","title":"Example 1 -- Feature Importance for Classifiers"},{"location":"user_guide/evaluate/feature_importance_permutation/#generate-a-toy-dataset","text":"from sklearn.datasets import make_classification from sklearn.ensemble import RandomForestClassifier # Build a classification task using 3 informative features X, y = make_classification(n_samples=10000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, n_classes=2, random_state=0, shuffle=False) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=1, stratify=y)","title":"Generate a toy dataset"},{"location":"user_guide/evaluate/feature_importance_permutation/#feature-importance-via-random-forest","text":"First, we compute the feature importance directly from the random forest via mean impurity decrease (described after the code section): forest = RandomForestClassifier(n_estimators=250, random_state=0) forest.fit(X_train, y_train) print('Training accuracy:', np.mean(forest.predict(X_train) == y_train)*100) print('Test accuracy:', np.mean(forest.predict(X_test) == y_test)*100) importance_vals = forest.feature_importances_ print(importance_vals) Training accuracy: 100.0 Test accuracy: 95.0666666667 [ 0.283357 0.30846795 0.24204291 0.02229767 0.02364941 0.02390578 0.02501543 0.0234225 0.02370816 0.0241332 ] There are several strategies for computing the feature importance in random forest. The method implemented in scikit-learn (used in the next code example) is based on the Breiman and Friedman's CART (Breiman, Friedman, \"Classification and regression trees\", 1984), the so-called mean impurity decrease . Here, the importance value of a features is computed by averaging the impurity decrease for that feature, when splitting a parent node into two child nodes, across all the trees in the ensemble. Note that the impurity decrease values are weighted by the number of samples that are in the respective nodes. This process is repeated for all features in the dataset, and the feature importance values are then normalized so that they sum up to 1. In CART, the authors also note that this fast way of computing feature importance values is relatively consistent with the permutation importance. Next, let's visualize the feature importance values from the random forest including a measure of the mean impurity decrease variability (here: standard deviation): std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importance_vals)[::-1] # Plot the feature importances of the forest plt.figure() plt.title(\"Random Forest feature importance\") plt.bar(range(X.shape[1]), importance_vals[indices], yerr=std[indices], align=\"center\") plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show() As we can see, the features 1, 0, and 2 are estimated to be the most informative ones for the random forest classier. Next, let's compute the feature importance via the permutation importance approach.","title":"Feature importance via random forest"},{"location":"user_guide/evaluate/feature_importance_permutation/#permutation-importance","text":"imp_vals, _ = feature_importance_permutation( predict_method=forest.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=1, seed=1) imp_vals array([ 0.26833333, 0.26733333, 0.261 , -0.002 , -0.00033333, 0.00066667, 0.00233333, 0.00066667, 0.00066667, -0.00233333]) Note that the feature_importance_permutation returns two arrays. The first array (here: imp_vals ) contains the actual importance values we are interested in. If num_rounds > 1 , the permutation is repeated multiple times (with different random seeds), and in this case the first array contains the average value of the importance computed from the different runs. The second array (here, assigned to _ , because we are not using it) then contains all individual values from these runs (more about that later). Now, let's also visualize the importance values in a barplot: indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"Random Forest feature importance via permutation importance\") plt.bar(range(X.shape[1]), imp_vals[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show() As we can see, also here, features 1, 0, and 2 are predicted to be the most important ones, which is consistent with the feature importance values that we computed via the mean impurity decrease method earlier. (Note that in the context of random forests, the feature importance via permutation importance is typically computed using the out-of-bag samples of a random forest, whereas in this implementation, an independent dataset is used.) Previously, it was mentioned that the permutation is repeated multiple times if num_rounds > 1 . In this case, the second array returned by the feature_importance_permutation contains the importance values for these individual runs (the array has shape [num_features, num_rounds), which we can use to compute some sort of variability between these runs. imp_vals, imp_all = feature_importance_permutation( predict_method=forest.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=10, seed=1) std = np.std(imp_all, axis=1) indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"Random Forest feature importance via permutation importance w. std. dev.\") plt.bar(range(X.shape[1]), imp_vals[indices], yerr=std[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show() It shall be noted that the feature importance values do not sum up to one, since they are not normalized (you can normalize them if you'd like, by dividing these by the sum of importance values). Here, the main point is to look at the importance values relative to each other and not to over-interpret the absolute values.","title":"Permutation Importance"},{"location":"user_guide/evaluate/feature_importance_permutation/#support-vector-machines","text":"While the permutation importance approach yields results that are generally consistent with the mean impurity decrease feature importance values from a random forest, it's a method that is model-agnostic and can be used with any kind of classifier or regressor. The example below applies the feature_importance_permutation function to a support vector machine: from sklearn.svm import SVC svm = SVC(C=1.0, kernel='rbf') svm.fit(X_train, y_train) print('Training accuracy', np.mean(svm.predict(X_train) == y_train)*100) print('Test accuracy', np.mean(svm.predict(X_test) == y_test)*100) Training accuracy 95.0857142857 Test accuracy 94.9666666667 imp_vals, imp_all = feature_importance_permutation( predict_method=svm.predict, X=X_test, y=y_test, metric='accuracy', num_rounds=10, seed=1) std = np.std(imp_all, axis=1) indices = np.argsort(imp_vals)[::-1] plt.figure() plt.title(\"SVM feature importance via permutation importance\") plt.bar(range(X.shape[1]), imp_vals[indices], yerr=std[indices]) plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show()","title":"Support Vector Machines"},{"location":"user_guide/evaluate/feature_importance_permutation/#example-1-feature-importance-for-regressors","text":"import numpy as np import matplotlib.pyplot as plt from mlxtend.evaluate import feature_importance_permutation from sklearn.model_selection import train_test_split from sklearn.datasets import make_regression from sklearn.svm import SVR X, y = make_regression(n_samples=1000, n_features=5, n_informative=2, n_targets=1, random_state=123, shuffle=False) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=123) svm = SVR(kernel='rbf') svm.fit(X_train, y_train) imp_vals, _ = feature_importance_permutation( predict_method=svm.predict, X=X_test, y=y_test, metric='r2', num_rounds=1, seed=1) imp_vals array([ 0.43676245, 0.22231268, 0.00146906, 0.01611528, -0.00522067]) plt.figure() plt.bar(range(X.shape[1]), imp_vals) plt.xticks(range(X.shape[1])) plt.xlim([-1, X.shape[1]]) plt.ylim([0, 0.5]) plt.show()","title":"Example 1 -- Feature Importance for Regressors"},{"location":"user_guide/evaluate/feature_importance_permutation/#api","text":"feature_importance_permutation(X, y, predict_method, metric, num_rounds=1, seed=None) Feature importance imputation via permutation importance Parameters X : NumPy array, shape = [n_samples, n_features] Dataset, where n_samples is the number of samples and n_features is the number of features. y : NumPy array, shape = [n_samples] Target values. predict_method : prediction function A callable function that predicts the target values from X. metric : str, callable The metric for evaluating the feature importance through permutation. By default, the strings 'accuracy' is recommended for classifiers and the string 'r2' is recommended for regressors. Optionally, a custom scoring function (e.g., metric=scoring_func ) that accepts two arguments, y_true and y_pred, which have similar shape to the y array. num_rounds : int (default=1) Number of rounds the feature columns are permuted to compute the permutation importance. seed : int or None (default=None) Random seed for permuting the feature columns. Returns mean_importance_vals, all_importance_vals : NumPy arrays. The first array, mean_importance_vals has shape [n_features, ] and contains the importance values for all features. The shape of the second array is [n_features, num_rounds] and contains the feature importance for each repetition. If num_rounds=1, it contains the same values as the first array, mean_importance_vals. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/","title":"API"},{"location":"user_guide/evaluate/ftest/","text":"F-Test F-test for comparing the performance of multiple classifiers. from mlxtend.evaluate import ftest Overview In the context of evaluating machine learning models, the F-test by George W. Snedecor [1] can be regarded as analogous to Cochran's Q test that can be applied to evaluate multiple classifiers (i.e., whether their accuracies estimated on a test set differ) as described by Looney [2][3]. More formally, assume the task to test the null hypothesis that there is no difference between the classification accuracies [1]: p_i: H_0 = p_1 = p_2 = \\cdots = p_L. Let \\{D_1, \\dots , D_L\\} be a set of classifiers who have all been tested on the same dataset. If the L classifiers don't perform differently, then the F statistic is distributed according to an F distribution with (L-1 ) and (L-1)\\times N degrees of freedom, where N is the number of examples in the test set. The calculation of the F statistic consists of several components, which are listed below (adopted from [3]). Sum of squares of the classifiers: SSA = N \\sum_{i=1}^{N} (L_j)^2, where L_j is the number of classifiers out of L that correctly classified object \\mathbf{z}_j \\in \\mathbf{Z}_{N} , where \\mathbf{Z}_{N} = \\{\\mathbf{z}_1, ... \\mathbf{z}_{N}\\} is the test dataset on which the classifers are tested on. The sum of squares for the objects: SSB= \\frac{1}{L} \\sum_{j=1}^N (L_j)^2 - L\\cdot N \\cdot ACC_{avg}^2, where ACC_{avg} is the average of the accuracies of the different models ACC_{avg} = \\sum_{i=1}^L ACC_i . The total sum of squares: SST = L\\cdot N \\cdot ACC_{avg}^2 (1 - ACC_{avg}^2). The sum of squares for the classification--object interaction: SSAB = SST - SSA - SSB. The mean SSA and mean SSAB values: MSA = \\frac{SSA}{L-1}, and MSAB = \\frac{SSAB}{(L-1) (N-1)}. From the MSA and MSAB, we can then calculate the F-value as F = \\frac{MSA}{MSAB}. After computing the F-value, we can then look up the p-value from a F-distribution table for the corresponding degrees of freedom or obtain it computationally from a cumulative F-distribution function. In practice, if we successfully rejected the null hypothesis at a previously chosen significance threshold, we could perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions. References [1] Snedecor, George W. and Cochran, William G. (1989), Statistical Methods, Eighth Edition, Iowa State University Press. [2] Looney, Stephen W. \"A statistical technique for comparing the accuracies of several classifiers.\" Pattern Recognition Letters 8, no. 1 (1988): 5-9. [3] Kuncheva, Ludmila I. Combining pattern classifiers: methods and algorithms. John Wiley & Sons, 2004. Example 1 - F-test import numpy as np from mlxtend.evaluate import ftest ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) Assuming a significance level \\alpha=0.05 , we can conduct Cochran's Q test as follows, to test the null hypothesis there is no difference between the classification accuracies, p_i: H_0 = p_1 = p_2 = \\cdots = p_L : f, p_value = ftest(y_true, y_model_1, y_model_2, y_model_3) print('F: %.3f' % f) print('p-value: %.3f' % p_value) F: 3.873 p-value: 0.022 Since the p-value is smaller than \\alpha , we can reject the null hypothesis and conclude that there is a difference between the classification accuracies. As mentioned in the introduction earlier, we could now perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions. API ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/","title":"F-Test"},{"location":"user_guide/evaluate/ftest/#f-test","text":"F-test for comparing the performance of multiple classifiers. from mlxtend.evaluate import ftest","title":"F-Test"},{"location":"user_guide/evaluate/ftest/#overview","text":"In the context of evaluating machine learning models, the F-test by George W. Snedecor [1] can be regarded as analogous to Cochran's Q test that can be applied to evaluate multiple classifiers (i.e., whether their accuracies estimated on a test set differ) as described by Looney [2][3]. More formally, assume the task to test the null hypothesis that there is no difference between the classification accuracies [1]: p_i: H_0 = p_1 = p_2 = \\cdots = p_L. Let \\{D_1, \\dots , D_L\\} be a set of classifiers who have all been tested on the same dataset. If the L classifiers don't perform differently, then the F statistic is distributed according to an F distribution with (L-1 ) and (L-1)\\times N degrees of freedom, where N is the number of examples in the test set. The calculation of the F statistic consists of several components, which are listed below (adopted from [3]). Sum of squares of the classifiers: SSA = N \\sum_{i=1}^{N} (L_j)^2, where L_j is the number of classifiers out of L that correctly classified object \\mathbf{z}_j \\in \\mathbf{Z}_{N} , where \\mathbf{Z}_{N} = \\{\\mathbf{z}_1, ... \\mathbf{z}_{N}\\} is the test dataset on which the classifers are tested on. The sum of squares for the objects: SSB= \\frac{1}{L} \\sum_{j=1}^N (L_j)^2 - L\\cdot N \\cdot ACC_{avg}^2, where ACC_{avg} is the average of the accuracies of the different models ACC_{avg} = \\sum_{i=1}^L ACC_i . The total sum of squares: SST = L\\cdot N \\cdot ACC_{avg}^2 (1 - ACC_{avg}^2). The sum of squares for the classification--object interaction: SSAB = SST - SSA - SSB. The mean SSA and mean SSAB values: MSA = \\frac{SSA}{L-1}, and MSAB = \\frac{SSAB}{(L-1) (N-1)}. From the MSA and MSAB, we can then calculate the F-value as F = \\frac{MSA}{MSAB}. After computing the F-value, we can then look up the p-value from a F-distribution table for the corresponding degrees of freedom or obtain it computationally from a cumulative F-distribution function. In practice, if we successfully rejected the null hypothesis at a previously chosen significance threshold, we could perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions.","title":"Overview"},{"location":"user_guide/evaluate/ftest/#references","text":"[1] Snedecor, George W. and Cochran, William G. (1989), Statistical Methods, Eighth Edition, Iowa State University Press. [2] Looney, Stephen W. \"A statistical technique for comparing the accuracies of several classifiers.\" Pattern Recognition Letters 8, no. 1 (1988): 5-9. [3] Kuncheva, Ludmila I. Combining pattern classifiers: methods and algorithms. John Wiley & Sons, 2004.","title":"References"},{"location":"user_guide/evaluate/ftest/#example-1-f-test","text":"import numpy as np from mlxtend.evaluate import ftest ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_3 = np.array([1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]) Assuming a significance level \\alpha=0.05 , we can conduct Cochran's Q test as follows, to test the null hypothesis there is no difference between the classification accuracies, p_i: H_0 = p_1 = p_2 = \\cdots = p_L : f, p_value = ftest(y_true, y_model_1, y_model_2, y_model_3) print('F: %.3f' % f) print('p-value: %.3f' % p_value) F: 3.873 p-value: 0.022 Since the p-value is smaller than \\alpha , we can reject the null hypothesis and conclude that there is a difference between the classification accuracies. As mentioned in the introduction earlier, we could now perform multiple post hoc pair-wise tests -- for example, McNemar tests with a Bonferroni correction -- to determine which pairs have different population proportions.","title":"Example 1 - F-test"},{"location":"user_guide/evaluate/ftest/#api","text":"ftest(y_target, y_model_predictions)* F-Test test to compare 2 or more models. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. *y_model_predictions : array-likes, shape=[n_samples] Variable number of 2 or more arrays that contain the predicted class labels from models as 1D NumPy array. Returns f, p : float or None, float Returns the F-value and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/","title":"API"},{"location":"user_guide/evaluate/lift_score/","text":"Lift Score Scoring function to compute the LIFT metric, the ratio of correctly predicted positive examples and the actual positive examples in the test dataset. from mlxtend.evaluate import lift_score Overview In the context of classification, lift [1] compares model predictions to randomly generated predictions. Lift is often used in marketing research combined with gain and lift charts as a visual aid [2]. For example, assuming a 10% customer response as a baseline, a lift value of 3 would correspond to a 30% customer response when using the predictive model. Note that lift has the range \\lbrack 0, \\infty \\rbrack . There are many strategies to compute lift , and below, we will illustrate the computation of the lift score using a classic confusion matrix. For instance, let's assume the following prediction and target labels, where \"1\" is the positive class: \\text{true labels}: [0, 0, 1, 0, 0, 1, 1, 1, 1, 1] \\text{prediction}: [1, 0, 1, 0, 0, 0, 0, 1, 0, 0] Then, our confusion matrix would look as follows: Based on the confusion matrix above, with \"1\" as positive label, we compute lift as follows: \\text{lift} = \\frac{(TP/(TP+FP)}{(TP+FN)/(TP+TN+FP+FN)} Plugging in the actual values from the example above, we arrive at the following lift value: \\frac{2/(2+1)}{(2+4)/(2+3+1+4)} = 1.1111111111111112 An alternative way to computing lift is by using the support metric [3]: \\text{lift} = \\frac{\\text{support}(\\text{true labels} \\cap \\text{prediction})}{\\text{support}(\\text{true labels}) \\times \\text{support}(\\text{prediction})}, Support is x / N , where x is the number of incidences of an observation and N is the total number of samples in the datset. \\text{true labels} \\cap \\text{prediction} are the true positives, true labels are true positives plus false negatives, and prediction are true positives plus false positives. Plugging the values from our example into the equation above, we arrive at: \\frac{2/10}{(6/10 \\times 3/10)} = 1.1111111111111112 References [1] S. Brin, R. Motwani, J. D. Ullman, and S. Tsur. Dynamic itemset counting and implication rules for market basket data . In Proc. of the ACM SIGMOD Int'l Conf. on Management of Data (ACM SIGMOD '97), pages 265-276, 1997. [2] https://www3.nd.edu/~busiforc/Lift_chart.html [3] https://en.wikipedia.org/wiki/Association_rule_learning#Support Example 1 - Computing Lift This examples demonstrates the basic use of the lift_score function using the example from the Overview section. import numpy as np from mlxtend.evaluate import lift_score y_target = np.array([0, 0, 1, 0, 0, 1, 1, 1, 1, 1]) y_predicted = np.array([1, 0, 1, 0, 0, 0, 0, 1, 0, 0]) lift_score(y_target, y_predicted) 1.1111111111111112 Example 2 - Using lift_score in GridSearch The lift_score function can also be used with scikit-learn objects, such as GridSearch : from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC from sklearn.metrics import make_scorer # make custom scorer lift_scorer = make_scorer(lift_score) iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, stratify=y, random_state=123) hyperparameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] clf = GridSearchCV(SVC(), hyperparameters, cv=10, scoring=lift_scorer) clf.fit(X_train, y_train) print(clf.best_score_) print(clf.best_params_) 3.0 {'gamma': 0.001, 'kernel': 'rbf', 'C': 1000} API lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP/(TP+FN) ] / [ (TP+FP) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/","title":"Lift Score"},{"location":"user_guide/evaluate/lift_score/#lift-score","text":"Scoring function to compute the LIFT metric, the ratio of correctly predicted positive examples and the actual positive examples in the test dataset. from mlxtend.evaluate import lift_score","title":"Lift Score"},{"location":"user_guide/evaluate/lift_score/#overview","text":"In the context of classification, lift [1] compares model predictions to randomly generated predictions. Lift is often used in marketing research combined with gain and lift charts as a visual aid [2]. For example, assuming a 10% customer response as a baseline, a lift value of 3 would correspond to a 30% customer response when using the predictive model. Note that lift has the range \\lbrack 0, \\infty \\rbrack . There are many strategies to compute lift , and below, we will illustrate the computation of the lift score using a classic confusion matrix. For instance, let's assume the following prediction and target labels, where \"1\" is the positive class: \\text{true labels}: [0, 0, 1, 0, 0, 1, 1, 1, 1, 1] \\text{prediction}: [1, 0, 1, 0, 0, 0, 0, 1, 0, 0] Then, our confusion matrix would look as follows: Based on the confusion matrix above, with \"1\" as positive label, we compute lift as follows: \\text{lift} = \\frac{(TP/(TP+FP)}{(TP+FN)/(TP+TN+FP+FN)} Plugging in the actual values from the example above, we arrive at the following lift value: \\frac{2/(2+1)}{(2+4)/(2+3+1+4)} = 1.1111111111111112 An alternative way to computing lift is by using the support metric [3]: \\text{lift} = \\frac{\\text{support}(\\text{true labels} \\cap \\text{prediction})}{\\text{support}(\\text{true labels}) \\times \\text{support}(\\text{prediction})}, Support is x / N , where x is the number of incidences of an observation and N is the total number of samples in the datset. \\text{true labels} \\cap \\text{prediction} are the true positives, true labels are true positives plus false negatives, and prediction are true positives plus false positives. Plugging the values from our example into the equation above, we arrive at: \\frac{2/10}{(6/10 \\times 3/10)} = 1.1111111111111112","title":"Overview"},{"location":"user_guide/evaluate/lift_score/#references","text":"[1] S. Brin, R. Motwani, J. D. Ullman, and S. Tsur. Dynamic itemset counting and implication rules for market basket data . In Proc. of the ACM SIGMOD Int'l Conf. on Management of Data (ACM SIGMOD '97), pages 265-276, 1997. [2] https://www3.nd.edu/~busiforc/Lift_chart.html [3] https://en.wikipedia.org/wiki/Association_rule_learning#Support","title":"References"},{"location":"user_guide/evaluate/lift_score/#example-1-computing-lift","text":"This examples demonstrates the basic use of the lift_score function using the example from the Overview section. import numpy as np from mlxtend.evaluate import lift_score y_target = np.array([0, 0, 1, 0, 0, 1, 1, 1, 1, 1]) y_predicted = np.array([1, 0, 1, 0, 0, 0, 0, 1, 0, 0]) lift_score(y_target, y_predicted) 1.1111111111111112","title":"Example 1 - Computing Lift"},{"location":"user_guide/evaluate/lift_score/#example-2-using-lift_score-in-gridsearch","text":"The lift_score function can also be used with scikit-learn objects, such as GridSearch : from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC from sklearn.metrics import make_scorer # make custom scorer lift_scorer = make_scorer(lift_score) iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, stratify=y, random_state=123) hyperparameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] clf = GridSearchCV(SVC(), hyperparameters, cv=10, scoring=lift_scorer) clf.fit(X_train, y_train) print(clf.best_score_) print(clf.best_params_) 3.0 {'gamma': 0.001, 'kernel': 'rbf', 'C': 1000}","title":"Example 2 - Using lift_score in GridSearch"},{"location":"user_guide/evaluate/lift_score/#api","text":"lift_score(y_target, y_predicted, binary=True, positive_label=1) Lift measures the degree to which the predictions of a classification model are better than randomly-generated predictions. The in terms of True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN), the lift score is computed as: [ TP/(TP+FN) ] / [ (TP+FP) / (TP+TN+FP+FN) ] Parameters y_target : array-like, shape=[n_samples] True class labels. y_predicted : array-like, shape=[n_samples] Predicted class labels. binary : bool (default: True) Maps a multi-class problem onto a binary, where the positive class is 1 and all other classes are 0. positive_label : int (default: 0) Class label of the positive class. Returns score : float Lift score in the range [0, \\infty ] Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/","title":"API"},{"location":"user_guide/evaluate/mcnemar/","text":"McNemar's Test McNemar's test for paired nominal data from mlxtend.evaluate import mcnemar Overview McNemar's Test [1] (sometimes also called \"within-subjects chi-squared test\") is a statistical test for paired nominal data. In context of machine learning (or statistical) models, we can use McNemar's Test to compare the predictive accuracy of two models. McNemar's test is based on a 2 times 2 contigency table of the two model's predictions. McNemar's Test Statistic In McNemar's Test, we formulate the null hypothesis that the probabilities p(b) and p(c) are the same, or in simplified terms: None of the two models performs better than the other. Thus, the alternative hypothesis is that the performances of the two models are not equal. The McNemar test statistic (\"chi-squared\") can be computed as follows: \\chi^2 = \\frac{(b - c)^2}{(b + c)}, If the sum of cell c and b is sufficiently large, the \\chi^2 value follows a chi-squared distribution with one degree of freedom. After setting a significance threshold, e.g,. \\alpha=0.05 we can compute the p-value -- assuming that the null hypothesis is true, the p-value is the probability of observing this empirical (or a larger) chi-squared value. If the p-value is lower than our chosen significance level, we can reject the null hypothesis that the two model's performances are equal. Continuity Correction Approximately 1 year after Quinn McNemar published the McNemar Test [1], Edwards [2] proposed a continuity corrected version, which is the more commonly used variant today: \\chi^2 = \\frac{( \\mid b - c \\mid - 1)^2}{(b + c)}. Exact p-values As mentioned earlier, an exact binomial test is recommended for small sample sizes ( b + c < 25 [3]), since the chi-squared value is may not be well-approximated by the chi-squared distribution. The exact p-value can be computed as follows: p = 2 \\sum^{n}_{i=b} \\binom{n}{i} 0.5^i (1 - 0.5)^{n-i}, where n = b + c , and the factor 2 is used to compute the two-sided p-value. Example For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose. In the following coding examples, we will use these 2 scenarios A and B to illustrate McNemar's test. References [1] McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. [2] Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. [3] https://en.wikipedia.org/wiki/McNemar%27s_test Example 1 - Creating 2x2 Contigency tables The mcnemar funtion expects a 2x2 contingency table as a NumPy array that is formatted as follows: Such a contigency matrix can be created by using the mcnemar_table function from mlxtend.evaluate . For example: import numpy as np from mlxtend.evaluate import mcnemar_table # The correct target (class) labels y_target = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) # Class labels predicted by model 1 y_model1 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) # Class labels predicted by model 2 y_model2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_table(y_target=y_target, y_model1=y_model1, y_model2=y_model2) print(tb) [[4 1] [2 3]] Example 2 - McNemar's Test for Scenario B No, let us continue with the example mentioned in the overview section and assume that we already computed the 2x2 contigency table: import numpy as np tb_b = np.array([[9945, 25], [15, 15]]) To test the null hypothesis that the predictive performance of two models are equal (using a significance level of \\alpha=0.05 ), we can conduct a corrected McNemar Test for computing the chi-squared and p-value as follows: from mlxtend.evaluate import mcnemar chi2, p = mcnemar(ary=tb_b, corrected=True) print('chi-squared:', chi2) print('p-value:', p) chi-squared: 2.025 p-value: 0.154728923485 Since the p-value is larger than our assumed significance threshold ( \\alpha=0.05 ), we cannot reject our null hypothesis and assume that there is no significant difference between the two predictive models. Example 3 - McNemar's Test for Scenario A In contrast to scenario B (Example 2), the sample size in scenario A is relatively small (b + c = 11 + 1 = 12) and smaller than the recommended 25 [3] to approximate the computed chi-square value by the chi-square distribution well. In this case, we need to compute the exact p-value from the binomial distribution: from mlxtend.evaluate import mcnemar import numpy as np tb_a = np.array([[9959, 11], [1, 29]]) chi2, p = mcnemar(ary=tb_a, exact=True) print('chi-squared:', chi2) print('p-value:', p) chi-squared: None p-value: 0.005859375 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p \\approx 0.006 ) is smaller than \\alpha . API mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/)","title":"McNemar's Test"},{"location":"user_guide/evaluate/mcnemar/#mcnemars-test","text":"McNemar's test for paired nominal data from mlxtend.evaluate import mcnemar","title":"McNemar's Test"},{"location":"user_guide/evaluate/mcnemar/#overview","text":"McNemar's Test [1] (sometimes also called \"within-subjects chi-squared test\") is a statistical test for paired nominal data. In context of machine learning (or statistical) models, we can use McNemar's Test to compare the predictive accuracy of two models. McNemar's test is based on a 2 times 2 contigency table of the two model's predictions.","title":"Overview"},{"location":"user_guide/evaluate/mcnemar/#mcnemars-test-statistic","text":"In McNemar's Test, we formulate the null hypothesis that the probabilities p(b) and p(c) are the same, or in simplified terms: None of the two models performs better than the other. Thus, the alternative hypothesis is that the performances of the two models are not equal. The McNemar test statistic (\"chi-squared\") can be computed as follows: \\chi^2 = \\frac{(b - c)^2}{(b + c)}, If the sum of cell c and b is sufficiently large, the \\chi^2 value follows a chi-squared distribution with one degree of freedom. After setting a significance threshold, e.g,. \\alpha=0.05 we can compute the p-value -- assuming that the null hypothesis is true, the p-value is the probability of observing this empirical (or a larger) chi-squared value. If the p-value is lower than our chosen significance level, we can reject the null hypothesis that the two model's performances are equal.","title":"McNemar's Test Statistic"},{"location":"user_guide/evaluate/mcnemar/#continuity-correction","text":"Approximately 1 year after Quinn McNemar published the McNemar Test [1], Edwards [2] proposed a continuity corrected version, which is the more commonly used variant today: \\chi^2 = \\frac{( \\mid b - c \\mid - 1)^2}{(b + c)}.","title":"Continuity Correction"},{"location":"user_guide/evaluate/mcnemar/#exact-p-values","text":"As mentioned earlier, an exact binomial test is recommended for small sample sizes ( b + c < 25 [3]), since the chi-squared value is may not be well-approximated by the chi-squared distribution. The exact p-value can be computed as follows: p = 2 \\sum^{n}_{i=b} \\binom{n}{i} 0.5^i (1 - 0.5)^{n-i}, where n = b + c , and the factor 2 is used to compute the two-sided p-value.","title":"Exact p-values"},{"location":"user_guide/evaluate/mcnemar/#example","text":"For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose. In the following coding examples, we will use these 2 scenarios A and B to illustrate McNemar's test.","title":"Example"},{"location":"user_guide/evaluate/mcnemar/#references","text":"[1] McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. [2] Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. [3] https://en.wikipedia.org/wiki/McNemar%27s_test","title":"References"},{"location":"user_guide/evaluate/mcnemar/#example-1-creating-2x2-contigency-tables","text":"The mcnemar funtion expects a 2x2 contingency table as a NumPy array that is formatted as follows: Such a contigency matrix can be created by using the mcnemar_table function from mlxtend.evaluate . For example: import numpy as np from mlxtend.evaluate import mcnemar_table # The correct target (class) labels y_target = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) # Class labels predicted by model 1 y_model1 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) # Class labels predicted by model 2 y_model2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_table(y_target=y_target, y_model1=y_model1, y_model2=y_model2) print(tb) [[4 1] [2 3]]","title":"Example 1 - Creating 2x2 Contigency tables"},{"location":"user_guide/evaluate/mcnemar/#example-2-mcnemars-test-for-scenario-b","text":"No, let us continue with the example mentioned in the overview section and assume that we already computed the 2x2 contigency table: import numpy as np tb_b = np.array([[9945, 25], [15, 15]]) To test the null hypothesis that the predictive performance of two models are equal (using a significance level of \\alpha=0.05 ), we can conduct a corrected McNemar Test for computing the chi-squared and p-value as follows: from mlxtend.evaluate import mcnemar chi2, p = mcnemar(ary=tb_b, corrected=True) print('chi-squared:', chi2) print('p-value:', p) chi-squared: 2.025 p-value: 0.154728923485 Since the p-value is larger than our assumed significance threshold ( \\alpha=0.05 ), we cannot reject our null hypothesis and assume that there is no significant difference between the two predictive models.","title":"Example 2 - McNemar's Test for Scenario B"},{"location":"user_guide/evaluate/mcnemar/#example-3-mcnemars-test-for-scenario-a","text":"In contrast to scenario B (Example 2), the sample size in scenario A is relatively small (b + c = 11 + 1 = 12) and smaller than the recommended 25 [3] to approximate the computed chi-square value by the chi-square distribution well. In this case, we need to compute the exact p-value from the binomial distribution: from mlxtend.evaluate import mcnemar import numpy as np tb_a = np.array([[9959, 11], [1, 29]]) chi2, p = mcnemar(ary=tb_a, exact=True) print('chi-squared:', chi2) print('p-value:', p) chi-squared: None p-value: 0.005859375 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p \\approx 0.006 ) is smaller than \\alpha .","title":"Example 3 - McNemar's Test for Scenario A"},{"location":"user_guide/evaluate/mcnemar/#api","text":"mcnemar(ary, corrected=True, exact=False) McNemar test for paired nominal data Parameters ary : array-like, shape=[2, 2] 2 x 2 contigency table (as returned by evaluate.mcnemar_table), where a: ary[0, 0]: # of samples that both models predicted correctly b: ary[0, 1]: # of samples that model 1 got right and model 2 got wrong c: ary[1, 0]: # of samples that model 2 got right and model 1 got wrong d: aryCell [1, 1]: # of samples that both models predicted incorrectly corrected : array-like, shape=[n_samples] (default: True) Uses Edward's continuity correction for chi-squared if True exact : bool, (default: False) If True , uses an exact binomial test comparing b to a binomial distribution with n = b + c and p = 0.5. It is highly recommended to use exact=True for sample sizes < 25 since chi-squared is not well-approximated by the chi-squared distribution! Returns chi2, p : float or None, float Returns the chi-squared value and the p-value; if exact=True (default: False ), chi2 is None Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/)","title":"API"},{"location":"user_guide/evaluate/mcnemar_table/","text":"Contigency Table for McNemar's Test Function to compute a 2x2 contingency table for McNemar's Test from mlxtend.evaluate import mcnemar_table Overview Contigency Table for McNemar's Test A 2x2 contigency table as being used in a McNemar's Test ( mlxtend.evaluate.mcnemar ) is a useful aid for comparing two different models. In contrast to a typical confusion matrix, this table compares two models to each other rather than showing the false positives, true positives, false negatives, and true negatives of a single model's predictions: For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose. References McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. https://en.wikipedia.org/wiki/McNemar%27s_test Example 2 - 2x2 Contigency Table import numpy as np from mlxtend.evaluate import mcnemar_table y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod1 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_table(y_target=y_true, y_model1=y_mod1, y_model2=y_mod2) tb array([[4, 1], [2, 3]]) To visualize (and better interpret) the contigency table via matplotlib, we can use the checkerboard_plot function: from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt brd = checkerboard_plot(tb, figsize=(3, 3), fmt='%d', col_labels=['model 2 wrong', 'model 2 right'], row_labels=['model 1 wrong', 'model 1 right']) plt.show() API mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/","title":"Contigency Table for McNemar's Test"},{"location":"user_guide/evaluate/mcnemar_table/#contigency-table-for-mcnemars-test","text":"Function to compute a 2x2 contingency table for McNemar's Test from mlxtend.evaluate import mcnemar_table","title":"Contigency Table for McNemar's Test"},{"location":"user_guide/evaluate/mcnemar_table/#overview","text":"","title":"Overview"},{"location":"user_guide/evaluate/mcnemar_table/#contigency-table-for-mcnemars-test_1","text":"A 2x2 contigency table as being used in a McNemar's Test ( mlxtend.evaluate.mcnemar ) is a useful aid for comparing two different models. In contrast to a typical confusion matrix, this table compares two models to each other rather than showing the false positives, true positives, false negatives, and true negatives of a single model's predictions: For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose.","title":"Contigency Table for McNemar's Test"},{"location":"user_guide/evaluate/mcnemar_table/#references","text":"McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. https://en.wikipedia.org/wiki/McNemar%27s_test","title":"References"},{"location":"user_guide/evaluate/mcnemar_table/#example-2-2x2-contigency-table","text":"import numpy as np from mlxtend.evaluate import mcnemar_table y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod1 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_table(y_target=y_true, y_model1=y_mod1, y_model2=y_mod2) tb array([[4, 1], [2, 3]]) To visualize (and better interpret) the contigency table via matplotlib, we can use the checkerboard_plot function: from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt brd = checkerboard_plot(tb, figsize=(3, 3), fmt='%d', col_labels=['model 2 wrong', 'model 2 right'], row_labels=['model 1 wrong', 'model 1 right']) plt.show()","title":"Example 2 - 2x2 Contigency Table"},{"location":"user_guide/evaluate/mcnemar_table/#api","text":"mcnemar_table(y_target, y_model1, y_model2) Compute a 2x2 contigency table for McNemar's test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model1 : array-like, shape=[n_samples] Predicted class labels from model as 1D NumPy array. y_model2 : array-like, shape=[n_samples] Predicted class labels from model 2 as 1D NumPy array. Returns tb : array-like, shape=[2, 2] 2x2 contingency table with the following contents: a: tb[0, 0]: # of samples that both models predicted correctly b: tb[0, 1]: # of samples that model 1 got right and model 2 got wrong c: tb[1, 0]: # of samples that model 2 got right and model 1 got wrong d: tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/","title":"API"},{"location":"user_guide/evaluate/mcnemar_tables/","text":"Contigency Tables for McNemar's Test and Cochran's Q Test Function to compute a 2x2 contingency tables for McNemar's Test and Cochran's Q Test from mlxtend.evaluate import mcnemar_tables Overview Contigency Tables A 2x2 contigency table as being used in a McNemar's Test ( mlxtend.evaluate.mcnemar ) is a useful aid for comparing two different models. In contrast to a typical confusion matrix, this table compares two models to each other rather than showing the false positives, true positives, false negatives, and true negatives of a single model's predictions: For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose. References McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. https://en.wikipedia.org/wiki/McNemar%27s_test Example 1 - Single 2x2 Contigency Table import numpy as np from mlxtend.evaluate import mcnemar_tables y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod1 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_tables(y_true, y_mod0, y_mod1) tb {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]])} To visualize (and better interpret) the contigency table via matplotlib, we can use the checkerboard_plot function: from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt brd = checkerboard_plot(tb['model_0 vs model_1'], figsize=(3, 3), fmt='%d', col_labels=['model 2 wrong', 'model 2 right'], row_labels=['model 1 wrong', 'model 1 right']) plt.show() Example 2 - Multiple 2x2 Contigency Tables If more than two models are provided as input to the mcnemar_tables function, a 2x2 contingency table will be created for each pair of models: import numpy as np from mlxtend.evaluate import mcnemar_tables y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod1 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 1, 0]) tb = mcnemar_tables(y_true, y_mod0, y_mod1, y_mod2) for key, value in tb.items(): print(key, '\\n', value, '\\n') model_0 vs model_1 [[ 4. 1.] [ 2. 3.]] model_0 vs model_2 [[ 4. 2.] [ 2. 2.]] model_1 vs model_2 [[ 5. 1.] [ 0. 4.]] API mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/)","title":"Contigency Tables for McNemar's Test and Cochran's Q Test"},{"location":"user_guide/evaluate/mcnemar_tables/#contigency-tables-for-mcnemars-test-and-cochrans-q-test","text":"Function to compute a 2x2 contingency tables for McNemar's Test and Cochran's Q Test from mlxtend.evaluate import mcnemar_tables","title":"Contigency Tables for McNemar's Test and Cochran's Q Test"},{"location":"user_guide/evaluate/mcnemar_tables/#overview","text":"","title":"Overview"},{"location":"user_guide/evaluate/mcnemar_tables/#contigency-tables","text":"A 2x2 contigency table as being used in a McNemar's Test ( mlxtend.evaluate.mcnemar ) is a useful aid for comparing two different models. In contrast to a typical confusion matrix, this table compares two models to each other rather than showing the false positives, true positives, false negatives, and true negatives of a single model's predictions: For instance, given that 2 models have a accuracy of with a 99.7% and 99.6% a 2x2 contigency table can provide further insights for model selection. In both subfigure A and B, the predictive accuracies of the two models are as follows: model 1 accuracy: 9,960 / 10,000 = 99.6% model 2 accuracy: 9,970 / 10,000 = 99.7% Now, in subfigure A, we can see that model 2 got 11 predictions right that model 1 got wrong. Vice versa, model 2 got 1 prediction right that model 2 got wrong. Thus, based on this 11:1 ratio, we may conclude that model 2 performs substantially better than model 1. However, in subfigure B, the ratio is 25:15, which is less conclusive about which model is the better one to choose.","title":"Contigency Tables"},{"location":"user_guide/evaluate/mcnemar_tables/#references","text":"McNemar, Quinn, 1947. \" Note on the sampling error of the difference between correlated proportions or percentages \". Psychometrika. 12 (2): 153\u2013157. Edwards AL: Note on the \u201ccorrection for continuity\u201d in testing the significance of the difference between correlated proportions. Psychometrika. 1948, 13 (3): 185-187. 10.1007/BF02289261. https://en.wikipedia.org/wiki/McNemar%27s_test","title":"References"},{"location":"user_guide/evaluate/mcnemar_tables/#example-1-single-2x2-contigency-table","text":"import numpy as np from mlxtend.evaluate import mcnemar_tables y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod1 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) tb = mcnemar_tables(y_true, y_mod0, y_mod1) tb {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]])} To visualize (and better interpret) the contigency table via matplotlib, we can use the checkerboard_plot function: from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt brd = checkerboard_plot(tb['model_0 vs model_1'], figsize=(3, 3), fmt='%d', col_labels=['model 2 wrong', 'model 2 right'], row_labels=['model 1 wrong', 'model 1 right']) plt.show()","title":"Example 1 - Single 2x2 Contigency Table"},{"location":"user_guide/evaluate/mcnemar_tables/#example-2-multiple-2x2-contigency-tables","text":"If more than two models are provided as input to the mcnemar_tables function, a 2x2 contingency table will be created for each pair of models: import numpy as np from mlxtend.evaluate import mcnemar_tables y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod1 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod2 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 1, 0]) tb = mcnemar_tables(y_true, y_mod0, y_mod1, y_mod2) for key, value in tb.items(): print(key, '\\n', value, '\\n') model_0 vs model_1 [[ 4. 1.] [ 2. 3.]] model_0 vs model_2 [[ 4. 2.] [ 2. 2.]] model_1 vs model_2 [[ 5. 1.] [ 0. 4.]]","title":"Example 2 - Multiple 2x2 Contigency Tables"},{"location":"user_guide/evaluate/mcnemar_tables/#api","text":"mcnemar_tables(y_target, y_model_predictions)* Compute multiple 2x2 contigency tables for McNemar's test or Cochran's Q test. Parameters y_target : array-like, shape=[n_samples] True class labels as 1D NumPy array. y_model_predictions : array-like, shape=[n_samples] Predicted class labels for a model. Returns tables : dict Dictionary of NumPy arrays with shape=[2, 2]. Each dictionary key names the two models to be compared based on the order the models were passed as *y_model_predictions . The number of dictionary entries is equal to the number of pairwise combinations between the m models, i.e., \"m choose 2.\" For example the following target array (containing the true labels) and 3 models y_true = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) y_mod0 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 0, 1, 1, 0, 1, 1, 0, 0, 0]) y_mod0 = np.array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0]) would result in the following dictionary: {'model_0 vs model_1': array([[ 4., 1.], [ 2., 3.]]), 'model_0 vs model_2': array([[ 3., 0.], [ 3., 4.]]), 'model_1 vs model_2': array([[ 3., 0.], [ 2., 5.]])} Each array is structured in the following way: tb[0, 0]: # of samples that both models predicted correctly tb[0, 1]: # of samples that model a got right and model b got wrong tb[1, 0]: # of samples that model b got right and model a got wrong tb[1, 1]: # of samples that both models predicted incorrectly Examples For usage examples, please see [http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/](http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/)","title":"API"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/","text":"5x2cv paired t test 5x2cv paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_5x2cv Overview The 5x2cv paired t test is a procedure for comparing the performance of two models (classifiers or regressors) that was proposed by Dietterich [1] to address shortcomings in other methods such as the resampled paired t test (see paired_ttest_resampled ) and the k-fold cross-validated paired t test (see paired_ttest_kfold_cv ). To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the 5x2cv paired t test, we repeat the splitting (50% training and 50% test data) 5 times. In each of the 5 iterations, we fit A and B to the training split and evaluate their performance ( p_A and p_B ) on the test split. Then, we rotate the training and test sets (the training set becomes the test set and vice versa) compute the performance again, which results in 2 performance difference measures: p^{(1)} = p^{(1)}_A - p^{(1)}_B and p^{(2)} = p^{(2)}_A - p^{(2)}_B. Then, we estimate the estimate mean and variance of the differences: \\overline{p} = \\frac{p^{(1)} + p^{(2)}}{2} and s^2 = (p^{(1)} - \\overline{p})^2 + (p^{(2)} - \\overline{p})^2. The variance of the difference is computed for the 5 iterations and then used to compute the t statistic as follows: t = \\frac{p_1^{(1)}}{\\sqrt{(1/5) \\sum_{i=1}^{5}s_i^2}}, where p_1^{(1)} is the p_1 from the very first iteration. The t statistic, assuming that it approximately follows as t distribution with 5 degrees of freedom, under the null hypothesis that the models A and B have equal performance. Using the t statistic, the p value can be computed and compared with a previously chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. References [1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923. Example 1 - 5x2cv paired t test Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the 5x2cv t test: from mlxtend.evaluate import paired_ttest_5x2cv t, p = paired_ttest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.539 p value: 0.184 Since p > \\alpha , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 5.386 p value: 0.003 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha . API paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/","title":"5x2cv paired *t* test"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/#5x2cv-paired-t-test","text":"5x2cv paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_5x2cv","title":"5x2cv paired t test"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/#overview","text":"The 5x2cv paired t test is a procedure for comparing the performance of two models (classifiers or regressors) that was proposed by Dietterich [1] to address shortcomings in other methods such as the resampled paired t test (see paired_ttest_resampled ) and the k-fold cross-validated paired t test (see paired_ttest_kfold_cv ). To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the 5x2cv paired t test, we repeat the splitting (50% training and 50% test data) 5 times. In each of the 5 iterations, we fit A and B to the training split and evaluate their performance ( p_A and p_B ) on the test split. Then, we rotate the training and test sets (the training set becomes the test set and vice versa) compute the performance again, which results in 2 performance difference measures: p^{(1)} = p^{(1)}_A - p^{(1)}_B and p^{(2)} = p^{(2)}_A - p^{(2)}_B. Then, we estimate the estimate mean and variance of the differences: \\overline{p} = \\frac{p^{(1)} + p^{(2)}}{2} and s^2 = (p^{(1)} - \\overline{p})^2 + (p^{(2)} - \\overline{p})^2. The variance of the difference is computed for the 5 iterations and then used to compute the t statistic as follows: t = \\frac{p_1^{(1)}}{\\sqrt{(1/5) \\sum_{i=1}^{5}s_i^2}}, where p_1^{(1)} is the p_1 from the very first iteration. The t statistic, assuming that it approximately follows as t distribution with 5 degrees of freedom, under the null hypothesis that the models A and B have equal performance. Using the t statistic, the p value can be computed and compared with a previously chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models.","title":"Overview"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/#references","text":"[1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923.","title":"References"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/#example-1-5x2cv-paired-t-test","text":"Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the 5x2cv t test: from mlxtend.evaluate import paired_ttest_5x2cv t, p = paired_ttest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.539 p value: 0.184 Since p > \\alpha , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_5x2cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 5.386 p value: 0.003 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha .","title":"Example 1 - 5x2cv paired t test"},{"location":"user_guide/evaluate/paired_ttest_5x2cv/#api","text":"paired_ttest_5x2cv(estimator1, estimator2, X, y, scoring=None, random_seed=None) Implements the 5x2cv paired t test proposed by Dieterrich (1998) to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/","title":"API"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/","text":"K-fold cross-validated paired t test K-fold paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_kfold_cv Overview K-fold cross-validated paired t-test procedure is a common method for comparing the performance of two models (classifiers or regressors) and addresses some of the drawbacks of the resampled t-test procedure ; however, this method has still the problem that the training sets overlap and is not recommended to be used in practice [1], and techniques such as the paired_ttest_5x2cv should be used instead. To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the k-fold cross-validated paired t-test procedure, we split the test set into k parts of equal size, and each of these parts is then used for testing while the remaining k-1 parts (joined together) are used for training a classifier or regressor (i.e., the standard k-fold cross-validation procedure). In each k-fold cross-validation iteration, we then compute the difference in performance between A and B in each so that we obtain k difference measures. Now, by making the assumption that these k differences were independently drawn and follow an approximately normal distribution, we can compute the following t statistic with k-1 degrees of freedom according to Student's t test, under the null hypothesis that the models A and B have equal performance: t = \\frac{\\overline{p} \\sqrt{k}}{\\sqrt{\\sum_{i=1}^{k}(p^{(i) - \\overline{p}})^2 / (k-1)}}. Here, p^{(i)} computes the difference between the model performances in the i th iteration, p^{(i)} = p^{(i)}_A - p^{(i)}_B , and \\overline{p} represents the average difference between the classifier performances, \\overline{p} = \\frac{1}{k} \\sum^k_{i=1} p^{(i)} . Once we computed the t statistic we can compute the p value and compare it to our chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. The problem with this method, and the reason why it is not recommended to be used in practice, is that it violates an assumption of Student's t test [1]: the difference between the model performances ( p^{(i)} = p^{(i)}_A - p^{(i)}_B ) are not normal distributed because p^{(i)}_A and p^{(i)}_B are not independent the p^{(i)} 's themselves are not independent because training sets overlap References [1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923. Example 1 - K-fold cross-validated paired t test Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t-test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the k-fold cross-validated t-test: from mlxtend.evaluate import paired_ttest_kfold_cv t, p = paired_ttest_kfold_cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.861 p value: 0.096 Since p > t , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_kfold_cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 13.491 p value: 0.000 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha . API paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/","title":"K-fold cross-validated paired *t* test"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/#k-fold-cross-validated-paired-t-test","text":"K-fold paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_kfold_cv","title":"K-fold cross-validated paired t test"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/#overview","text":"K-fold cross-validated paired t-test procedure is a common method for comparing the performance of two models (classifiers or regressors) and addresses some of the drawbacks of the resampled t-test procedure ; however, this method has still the problem that the training sets overlap and is not recommended to be used in practice [1], and techniques such as the paired_ttest_5x2cv should be used instead. To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the k-fold cross-validated paired t-test procedure, we split the test set into k parts of equal size, and each of these parts is then used for testing while the remaining k-1 parts (joined together) are used for training a classifier or regressor (i.e., the standard k-fold cross-validation procedure). In each k-fold cross-validation iteration, we then compute the difference in performance between A and B in each so that we obtain k difference measures. Now, by making the assumption that these k differences were independently drawn and follow an approximately normal distribution, we can compute the following t statistic with k-1 degrees of freedom according to Student's t test, under the null hypothesis that the models A and B have equal performance: t = \\frac{\\overline{p} \\sqrt{k}}{\\sqrt{\\sum_{i=1}^{k}(p^{(i) - \\overline{p}})^2 / (k-1)}}. Here, p^{(i)} computes the difference between the model performances in the i th iteration, p^{(i)} = p^{(i)}_A - p^{(i)}_B , and \\overline{p} represents the average difference between the classifier performances, \\overline{p} = \\frac{1}{k} \\sum^k_{i=1} p^{(i)} . Once we computed the t statistic we can compute the p value and compare it to our chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. The problem with this method, and the reason why it is not recommended to be used in practice, is that it violates an assumption of Student's t test [1]: the difference between the model performances ( p^{(i)} = p^{(i)}_A - p^{(i)}_B ) are not normal distributed because p^{(i)}_A and p^{(i)}_B are not independent the p^{(i)} 's themselves are not independent because training sets overlap","title":"Overview"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/#references","text":"[1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923.","title":"References"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/#example-1-k-fold-cross-validated-paired-t-test","text":"Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t-test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the k-fold cross-validated t-test: from mlxtend.evaluate import paired_ttest_kfold_cv t, p = paired_ttest_kfold_cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.861 p value: 0.096 Since p > t , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_kfold_cv(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 13.491 p value: 0.000 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha .","title":"Example 1 - K-fold cross-validated paired t test"},{"location":"user_guide/evaluate/paired_ttest_kfold_cv/#api","text":"paired_ttest_kfold_cv(estimator1, estimator2, X, y, cv=10, scoring=None, shuffle=False, random_seed=None) Implements the k-fold paired t test procedure to compare the performance of two models. Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. cv : int (default: 10) Number of splits and iteration for the cross-validation procedure scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. shuffle : bool (default: True) Whether to shuffle the dataset for generating the k-fold splits. random_seed : int or None (default: None) Random seed for shuffling the dataset for generating the k-fold splits. Ignored if shuffle=False. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/","title":"API"},{"location":"user_guide/evaluate/paired_ttest_resampled/","text":"Resampled paired t test Resampled paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_resample Overview Resampled paired t test procedure (also called k-hold-out paired t test) is a popular method for comparing the performance of two models (classifiers or regressors); however, this method has many drawbacks and is not recommended to be used in practice [1], and techniques such as the paired_ttest_5x2cv should be used instead. To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the resampled paired t test procedure, we repeat this splitting procedure (with typically 2/3 training data and 1/3 test data) k times (usually 30). In each iteration, we train A and B on the training set and evaluate it on the test set. Then, we compute the difference in performance between A and B in each iteration so that we obtain k difference measures. Now, by making the assumption that these k differences were independently drawn and follow an approximately normal distribution, we can compute the following t statistic with k-1 degrees of freedom according to Student's t test, under the null hypothesis that the models A and B have equal performance: t = \\frac{\\overline{p} \\sqrt{k}}{\\sqrt{\\sum_{i=1}^{k}(p^{(i) - \\overline{p}})^2 / (k-1)}}. Here, p^{(i)} computes the difference between the model performances in the i th iteration, p^{(i)} = p^{(i)}_A - p^{(i)}_B , and \\overline{p} represents the average difference between the classifier performances, \\overline{p} = \\frac{1}{k} \\sum^k_{i=1} p^{(i)} . Once we computed the t statistic we can compute the p value and compare it to our chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. To summarize the procedure: i := 0 while i < k: split dataset into training and test subsets fit models A and B to the training set compute the performances of A and B on the test set record the performance difference between A and B i := i + 1 compute t-statistic compute p value from t-statistic with k-1 degrees of freedom compare p value to chosen significance threshold The problem with this method, and the reason why it is not recommended to be used in practice, is that it violates the assumptions of Student's t test [1]: the difference between the model performances ( p^{(i)} = p^{(i)}_A - p^{(i)}_B ) are not normal distributed because p^{(i)}_A and p^{(i)}_B are not independent the p^{(i)} 's themselves are not independent because of the overlapping test sets; also, test and training sets overlap as well References [1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923. Example 1 - Resampled paired t test Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the paired sample t test: from mlxtend.evaluate import paired_ttest_resampled t, p = paired_ttest_resampled(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.809 p value: 0.081 Since p > t , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_resampled(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 39.214 p value: 0.000 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha . API paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/","title":"Resampled paired *t* test"},{"location":"user_guide/evaluate/paired_ttest_resampled/#resampled-paired-t-test","text":"Resampled paired t test procedure to compare the performance of two models from mlxtend.evaluate import paired_ttest_resample","title":"Resampled paired t test"},{"location":"user_guide/evaluate/paired_ttest_resampled/#overview","text":"Resampled paired t test procedure (also called k-hold-out paired t test) is a popular method for comparing the performance of two models (classifiers or regressors); however, this method has many drawbacks and is not recommended to be used in practice [1], and techniques such as the paired_ttest_5x2cv should be used instead. To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D . In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the resampled paired t test procedure, we repeat this splitting procedure (with typically 2/3 training data and 1/3 test data) k times (usually 30). In each iteration, we train A and B on the training set and evaluate it on the test set. Then, we compute the difference in performance between A and B in each iteration so that we obtain k difference measures. Now, by making the assumption that these k differences were independently drawn and follow an approximately normal distribution, we can compute the following t statistic with k-1 degrees of freedom according to Student's t test, under the null hypothesis that the models A and B have equal performance: t = \\frac{\\overline{p} \\sqrt{k}}{\\sqrt{\\sum_{i=1}^{k}(p^{(i) - \\overline{p}})^2 / (k-1)}}. Here, p^{(i)} computes the difference between the model performances in the i th iteration, p^{(i)} = p^{(i)}_A - p^{(i)}_B , and \\overline{p} represents the average difference between the classifier performances, \\overline{p} = \\frac{1}{k} \\sum^k_{i=1} p^{(i)} . Once we computed the t statistic we can compute the p value and compare it to our chosen significance level, e.g., \\alpha=0.05 . If the p value is smaller than \\alpha , we reject the null hypothesis and accept that there is a significant difference in the two models. To summarize the procedure: i := 0 while i < k: split dataset into training and test subsets fit models A and B to the training set compute the performances of A and B on the test set record the performance difference between A and B i := i + 1 compute t-statistic compute p value from t-statistic with k-1 degrees of freedom compare p value to chosen significance threshold The problem with this method, and the reason why it is not recommended to be used in practice, is that it violates the assumptions of Student's t test [1]: the difference between the model performances ( p^{(i)} = p^{(i)}_A - p^{(i)}_B ) are not normal distributed because p^{(i)}_A and p^{(i)}_B are not independent the p^{(i)} 's themselves are not independent because of the overlapping test sets; also, test and training sets overlap as well","title":"Overview"},{"location":"user_guide/evaluate/paired_ttest_resampled/#references","text":"[1] Dietterich TG (1998) Approximate Statistical Tests for Comparing Supervised Classification Learning Algorithms. Neural Comput 10:1895\u20131923.","title":"References"},{"location":"user_guide/evaluate/paired_ttest_resampled/#example-1-resampled-paired-t-test","text":"Assume we want to compare two classification algorithms, logistic regression and a decision tree algorithm: from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from mlxtend.data import iris_data from sklearn.model_selection import train_test_split X, y = iris_data() clf1 = LogisticRegression(random_state=1) clf2 = DecisionTreeClassifier(random_state=1) X_train, X_test, y_train, y_test = \\ train_test_split(X, y, test_size=0.25, random_state=123) score1 = clf1.fit(X_train, y_train).score(X_test, y_test) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Logistic regression accuracy: %.2f%%' % (score1*100)) print('Decision tree accuracy: %.2f%%' % (score2*100)) Logistic regression accuracy: 97.37% Decision tree accuracy: 94.74% Note that these accuracy values are not used in the paired t test procedure as new test/train splits are generated during the resampling procedure, the values above are just serving the purpose of intuition. Now, let's assume a significance threshold of \\alpha=0.05 for rejecting the null hypothesis that both algorithms perform equally well on the dataset and conduct the paired sample t test: from mlxtend.evaluate import paired_ttest_resampled t, p = paired_ttest_resampled(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) t statistic: -1.809 p value: 0.081 Since p > t , we cannot reject the null hypothesis and may conclude that the performance of the two algorithms is not significantly different. While it is generally not recommended to apply statistical tests multiple times without correction for multiple hypothesis testing, let us take a look at an example where the decision tree algorithm is limited to producing a very simple decision boundary that would result in a relatively bad performance: clf2 = DecisionTreeClassifier(random_state=1, max_depth=1) score2 = clf2.fit(X_train, y_train).score(X_test, y_test) print('Decision tree accuracy: %.2f%%' % (score2*100)) t, p = paired_ttest_resampled(estimator1=clf1, estimator2=clf2, X=X, y=y, random_seed=1) print('t statistic: %.3f' % t) print('p value: %.3f' % p) Decision tree accuracy: 63.16% t statistic: 39.214 p value: 0.000 Assuming that we conducted this test also with a significance level of \\alpha=0.05 , we can reject the null-hypothesis that both models perform equally well on this dataset, since the p-value ( p < 0.001 ) is smaller than \\alpha .","title":"Example 1 - Resampled paired t test"},{"location":"user_guide/evaluate/paired_ttest_resampled/#api","text":"paired_ttest_resampled(estimator1, estimator2, X, y, num_rounds=30, test_size=0.3, scoring=None, random_seed=None) Implements the resampled paired t test procedure to compare the performance of two models (also called k-hold-out paired t test). Parameters estimator1 : scikit-learn classifier or regressor estimator2 : scikit-learn classifier or regressor X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. num_rounds : int (default: 30) Number of resampling iterations (i.e., train/test splits) test_size : float or int (default: 0.3) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to use as a test set. If int, represents the absolute number of test exsamples. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. random_seed : int or None (default: None) Random seed for creating the test/train splits. Returns t : float The t-statistic pvalue : float Two-tailed p-value. If the chosen significance level is larger than the p-value, we reject the null hypothesis and accept that there are significant differences in the two compared models. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/","title":"API"},{"location":"user_guide/evaluate/permutation_test/","text":"Permutation Test An implementation of a permutation test for hypothesis testing -- testing the null hypothesis that two different groups come from the same distribution. from mlxtend.evaluate import permutation_test Overview Permutation tests (also called exact tests, randomization tests, or re-randomization tests) are nonparametric test procedures to test the null hypothesis that two different groups come from the same distribution. A permutation test can be used for significance or hypothesis testing (including A/B testing) without requiring to make any assumptions about the sampling distribution (e.g., it doesn't require the samples to be normal distributed). Under the null hypothesis (treatment = control), any permutations are equally likely. (Note that there are (n+m)! permutations, where n is the number of records in the treatment sample, and m is the number of records in the control sample). For a two-sided test, we define the alternative hypothesis that the two samples are different (e.g., treatment != control). Compute the difference (here: mean) of sample x and sample y Combine all measurements into a single dataset Draw a permuted dataset from all possible permutations of the dataset in 2. Divide the permuted dataset into two datasets x' and y' of size n and m , respectively Compute the difference (here: mean) of sample x' and sample y' and record this difference Repeat steps 3-5 until all permutations are evaluated Return the p-value as the number of times the recorded differences were more extreme than the original difference from 1. and divide this number by the total number of permutations Here, the p-value is defined as the probability, given the null hypothesis (no difference between the samples) is true, that we obtain results that are at least as extreme as the results we observed (i.e., the sample difference from 1.). More formally, we can express the computation of the p-value as follows ([2]): p(t > t_0) = \\frac{1}{(n+m)!} \\sum^{(n+m)!}_{j=1} I(t_j > t_0), where t_0 is the observed value of the test statistic (1. in the list above), and t is the t-value, the statistic computed from the resamples (5.) t(x'_1, x'_2, ..., x'_n, y'_1, y'_2, ..., x'_m) = |\\bar{x'} - \\bar{y'}| , and I is the indicator function. Given a significance level that we specify prior to carrying out the permutation test (e.g., alpha=0.05), we fail to reject the null hypothesis if the p-value is greater than alpha. Note that if the number of permutation is large, sampling all permutation may not computationally be feasible. Thus, a common approximation is to perfom k rounds of permutations (where k is typically a value between 1000 and 2000). References [1] Efron, Bradley and Tibshirani, R. J., An introduction to the bootstrap, Chapman & Hall/CRC Monographs on Statistics & Applied Probability, 1994. [2] Unpingco, Jos\u00e9. Python for probability, statistics, and machine learning. Springer, 2016. [3] Pitman, E. J. G., Significance tests which may be applied to samples from any population, Royal Statistical Society Supplement, 1937, 4: 119-30 and 225-32 Example 1 -- Two-sided permutation test Perform a two-sided permutation test to test the null hypothesis that two groups, \"treatment\" and \"control\" come from the same distribution. We specify alpha=0.01 as our significance level. treatment = [ 28.44, 29.32, 31.22, 29.58, 30.34, 28.76, 29.21, 30.4 , 31.12, 31.78, 27.58, 31.57, 30.73, 30.43, 30.31, 30.32, 29.18, 29.52, 29.22, 30.56] control = [ 33.51, 30.63, 32.38, 32.52, 29.41, 30.93, 49.78, 28.96, 35.77, 31.42, 30.76, 30.6 , 23.64, 30.54, 47.78, 31.98, 34.52, 32.42, 31.32, 40.72] Since evaluating all possible permutations may take a while, we will use the approximation method (see the introduction for details): from mlxtend.evaluate import permutation_test p_value = permutation_test(treatment, control, method='approximate', num_rounds=10000, seed=0) print(p_value) 0.0066 Since p-value < alpha, we can reject the null hypothesis that the two samples come from the same distribution. Example 2 -- Calculating the p-value for correlation analysis (Pearson's R) Note: this is a one-sided hypothesis testing as we conduct the permutation test as \"how many times obtain a correlation coefficient that is greater than the observed value?\" import numpy as np from mlxtend.evaluate import permutation_test x = np.array([1, 2, 3, 4, 5, 6]) y = np.array([2, 4, 1, 5, 6, 7]) print('Observed pearson R: %.2f' % np.corrcoef(x, y)[1][0]) p_value = permutation_test(x, y, method='exact', func=lambda x, y: np.corrcoef(x, y)[1][0], seed=0) print('P value: %.2f' % p_value) Observed pearson R: 0.81 P value: 0.09 API permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/","title":"Permutation Test"},{"location":"user_guide/evaluate/permutation_test/#permutation-test","text":"An implementation of a permutation test for hypothesis testing -- testing the null hypothesis that two different groups come from the same distribution. from mlxtend.evaluate import permutation_test","title":"Permutation Test"},{"location":"user_guide/evaluate/permutation_test/#overview","text":"Permutation tests (also called exact tests, randomization tests, or re-randomization tests) are nonparametric test procedures to test the null hypothesis that two different groups come from the same distribution. A permutation test can be used for significance or hypothesis testing (including A/B testing) without requiring to make any assumptions about the sampling distribution (e.g., it doesn't require the samples to be normal distributed). Under the null hypothesis (treatment = control), any permutations are equally likely. (Note that there are (n+m)! permutations, where n is the number of records in the treatment sample, and m is the number of records in the control sample). For a two-sided test, we define the alternative hypothesis that the two samples are different (e.g., treatment != control). Compute the difference (here: mean) of sample x and sample y Combine all measurements into a single dataset Draw a permuted dataset from all possible permutations of the dataset in 2. Divide the permuted dataset into two datasets x' and y' of size n and m , respectively Compute the difference (here: mean) of sample x' and sample y' and record this difference Repeat steps 3-5 until all permutations are evaluated Return the p-value as the number of times the recorded differences were more extreme than the original difference from 1. and divide this number by the total number of permutations Here, the p-value is defined as the probability, given the null hypothesis (no difference between the samples) is true, that we obtain results that are at least as extreme as the results we observed (i.e., the sample difference from 1.). More formally, we can express the computation of the p-value as follows ([2]): p(t > t_0) = \\frac{1}{(n+m)!} \\sum^{(n+m)!}_{j=1} I(t_j > t_0), where t_0 is the observed value of the test statistic (1. in the list above), and t is the t-value, the statistic computed from the resamples (5.) t(x'_1, x'_2, ..., x'_n, y'_1, y'_2, ..., x'_m) = |\\bar{x'} - \\bar{y'}| , and I is the indicator function. Given a significance level that we specify prior to carrying out the permutation test (e.g., alpha=0.05), we fail to reject the null hypothesis if the p-value is greater than alpha. Note that if the number of permutation is large, sampling all permutation may not computationally be feasible. Thus, a common approximation is to perfom k rounds of permutations (where k is typically a value between 1000 and 2000).","title":"Overview"},{"location":"user_guide/evaluate/permutation_test/#references","text":"[1] Efron, Bradley and Tibshirani, R. J., An introduction to the bootstrap, Chapman & Hall/CRC Monographs on Statistics & Applied Probability, 1994. [2] Unpingco, Jos\u00e9. Python for probability, statistics, and machine learning. Springer, 2016. [3] Pitman, E. J. G., Significance tests which may be applied to samples from any population, Royal Statistical Society Supplement, 1937, 4: 119-30 and 225-32","title":"References"},{"location":"user_guide/evaluate/permutation_test/#example-1-two-sided-permutation-test","text":"Perform a two-sided permutation test to test the null hypothesis that two groups, \"treatment\" and \"control\" come from the same distribution. We specify alpha=0.01 as our significance level. treatment = [ 28.44, 29.32, 31.22, 29.58, 30.34, 28.76, 29.21, 30.4 , 31.12, 31.78, 27.58, 31.57, 30.73, 30.43, 30.31, 30.32, 29.18, 29.52, 29.22, 30.56] control = [ 33.51, 30.63, 32.38, 32.52, 29.41, 30.93, 49.78, 28.96, 35.77, 31.42, 30.76, 30.6 , 23.64, 30.54, 47.78, 31.98, 34.52, 32.42, 31.32, 40.72] Since evaluating all possible permutations may take a while, we will use the approximation method (see the introduction for details): from mlxtend.evaluate import permutation_test p_value = permutation_test(treatment, control, method='approximate', num_rounds=10000, seed=0) print(p_value) 0.0066 Since p-value < alpha, we can reject the null hypothesis that the two samples come from the same distribution.","title":"Example 1 -- Two-sided permutation test"},{"location":"user_guide/evaluate/permutation_test/#example-2-calculating-the-p-value-for-correlation-analysis-pearsons-r","text":"Note: this is a one-sided hypothesis testing as we conduct the permutation test as \"how many times obtain a correlation coefficient that is greater than the observed value?\" import numpy as np from mlxtend.evaluate import permutation_test x = np.array([1, 2, 3, 4, 5, 6]) y = np.array([2, 4, 1, 5, 6, 7]) print('Observed pearson R: %.2f' % np.corrcoef(x, y)[1][0]) p_value = permutation_test(x, y, method='exact', func=lambda x, y: np.corrcoef(x, y)[1][0], seed=0) print('P value: %.2f' % p_value) Observed pearson R: 0.81 P value: 0.09","title":"Example 2 -- Calculating the p-value for correlation analysis (Pearson's R)"},{"location":"user_guide/evaluate/permutation_test/#api","text":"permutation_test(x, y, func='x_mean != y_mean', method='exact', num_rounds=1000, seed=None) Nonparametric permutation test Parameters x : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the first sample (e.g., the treatment group). y : list or numpy array with shape (n_datapoints,) A list or 1D numpy array of the second sample (e.g., the control group). func : custom function or str (default: 'x_mean != y_mean') function to compute the statistic for the permutation test. - If 'x_mean != y_mean', uses func=lambda x, y: np.abs(np.mean(x) - np.mean(y))) for a two-sided test. - If 'x_mean > y_mean', uses func=lambda x, y: np.mean(x) - np.mean(y)) for a one-sided test. - If 'x_mean < y_mean', uses func=lambda x, y: np.mean(y) - np.mean(x)) for a one-sided test. method : 'approximate' or 'exact' (default: 'exact') If 'exact' (default), all possible permutations are considered. If 'approximate' the number of drawn samples is given by num_rounds . Note that 'exact' is typically not feasible unless the dataset size is relatively small. num_rounds : int (default: 1000) The number of permutation samples if method='approximate' . seed : int or None (default: None) The random seed for generating permutation samples if method='approximate' . Returns p-value under the null hypothesis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/","title":"API"},{"location":"user_guide/evaluate/proportion_difference/","text":"Proportion Difference Test Test of the difference of proportions for classifier performance comparison. from mlxtend.evaluate import proportion_difference Overview There are several different statistical hypothesis testing frameworks that are being used in practice to compare the performance of classification models, including common methods such as difference of two proportions (here, the proportions are the estimated generalization accuracies from a test set), for which we can construct 95% confidence intervals based on the concept of the Normal Approximation to the Binomial that was covered in Part I. Performing a z-score test for two population proportions is inarguably the most straight-forward way to compare to models (but certainly not the best!): In a nutshell, if the 95% confidence intervals of the accuracies of two models do not overlap, we can reject the null hypothesis that the performance of both classifiers is equal at a confidence level of \\alpha=0.05 (or 5% probability). Violations of assumptions aside (for instance that the test set samples are not independent), as Thomas Dietterich noted based on empircal results in a simulated study [1], this test tends to have a high false positive rate (here: incorrectly detecting difference when there is none), which is among the reasons why it is not recommended in practice. Nonetheless, for the sake of completeness, and since it a commonly used method in practice, the general procedure is outlined below as follows (which also generally applies to the different hypothesis tests presented later): formulate the hypothesis to be tested (for instance, the null hypothesis stating that the proportions are the same; consequently, the alternative hypothesis that the proportions are different, if we use a two-tailed test); decide upon a significance threshold (for instance, if the probability of observing a difference more extreme than the one observed is more than 5%, then we plan to reject the null hypothesis); analyze the data, compute the test statistic (here: z-score), and compare its associated p-value (probability) to the previously determined significance threshold; based on the p-value and significance threshold, either accept or reject the null hypothesis at the given confidence level and interpret the results. The z-score is computed as the observed difference divided by the square root for their combined variances z = \\frac{ACC_1 - ACC_2}{\\sqrt{\\sigma_{1}^2 + \\sigma_{2}^2}}, where ACC_1 is the accuracy of one model and ACC_2 is the accuracy of a second model estimated from the test set. Recall that we computed the variance of the estimated of the estimated accuracy as \\sigma^2 = \\frac{ACC(1-ACC)}{n} in Part I and then computed the confidence interval (Normal Approximation Interval) as ACC \\pm z \\times \\sigma, where z=1.96 for a 95% confidence interval. Comparing the confidence intervals of two accuracy estimates and checking whether they overlap is then analogous to computing the z value for the difference in proportions and comparing the probability (p-value) to the chosen significance threshold. So, to compute the z-score directly for the difference of two proportions, ACC_1 and ACC_2 , we pool these proportions (assuming that ACC_1 and ACC_2 are the performances of two models estimated on two indendent test sets of size n_1 and n_2 , respectively), ACC_{1, 2} = \\frac{ACC_1 \\times n_1 + ACC_2 \\times n_2}{n_1 + n_2}, and compute the standard deviation as \\sigma_{1,2} = \\sqrt{\\frac{ACC_{1, 2} (1 - ACC_{1, 2})}{n_1 + n_2}}, such that we can compute the z-score, z = \\frac{ACC_1 - ACC_2}{\\sigma_{1,2}}. Since, due to using the same test set (and violating the independence assumption) we have n_1 = n_2 = n , so that we can simplify the z-score computation to z = \\frac{ACC_1 - ACC_2}{\\sqrt{2\\sigma^2}} = \\frac{ACC_1 - ACC_2}{\\sqrt{2\\cdot ACC_{1,2}(1-ACC_{1,2}))/n}}. where ACC_{1, 2} is simply (ACC_1 + ACC_2)/2 . In the second step, based on the computed z value (this assumes the the test errors are independent, which is usually violated in practice as we use the same test set) we can reject the null hypothesis that the a pair of models has equal performance (here, measured in \"classification aaccuracy\") at an \\alpha=0.05 level if z is greater than 1.96. Or if we want to put in the extra work, we can compute the area under the a standard normal cumulative distribution at the z-score threshold. If we find this p-value is smaller than a significance level we set prior to conducting the test, then we can reject the null hypothesis at that given significance level. The problem with this test though is that we use the same test set to compute the accuracy of the two classifiers; thus, it might be better to use a paired test such as a paired sample t-test, but a more robust alternative is the McNemar test. References [1] Dietterich, Thomas G. \"Approximate statistical tests for comparing supervised classification learning algorithms.\" Neural computation 10, no. 7 (1998): 1895-1923. Example 1 - Difference of Proportions As an example for applying this test, consider the following 2 model predictions: import numpy as np ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) Assume, the test accuracies are as follows: acc_1 = np.sum(y_true == y_model_1) / y_true.shape[0] acc_2 = np.sum(y_true == y_model_2) / y_true.shape[0] print('Accuracy Model 1:', acc_1) print('Accuracy Model 2:', acc_2) Accuracy Model 1: 0.84 Accuracy Model 2: 0.92 Now, setting a significance threshold of \\alpha=0.05 and conducting the test from mlxtend.evaluate import proportion_difference z, p_value = proportion_difference(acc_1, acc_2, n_1=y_true.shape[0]) print('z: %.3f' % z) print('p-value: %.3f' % p_value) z: -1.754 p-value: 0.040 we find that there is a statistically significant difference between the model performances. It should be highlighted though that using this test, due to the typical independence violation of using the same test set as well as its high false positive rate, it is not recommended to use this test in practice. API proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/","title":"Proportion Difference Test"},{"location":"user_guide/evaluate/proportion_difference/#proportion-difference-test","text":"Test of the difference of proportions for classifier performance comparison. from mlxtend.evaluate import proportion_difference","title":"Proportion Difference Test"},{"location":"user_guide/evaluate/proportion_difference/#overview","text":"There are several different statistical hypothesis testing frameworks that are being used in practice to compare the performance of classification models, including common methods such as difference of two proportions (here, the proportions are the estimated generalization accuracies from a test set), for which we can construct 95% confidence intervals based on the concept of the Normal Approximation to the Binomial that was covered in Part I. Performing a z-score test for two population proportions is inarguably the most straight-forward way to compare to models (but certainly not the best!): In a nutshell, if the 95% confidence intervals of the accuracies of two models do not overlap, we can reject the null hypothesis that the performance of both classifiers is equal at a confidence level of \\alpha=0.05 (or 5% probability). Violations of assumptions aside (for instance that the test set samples are not independent), as Thomas Dietterich noted based on empircal results in a simulated study [1], this test tends to have a high false positive rate (here: incorrectly detecting difference when there is none), which is among the reasons why it is not recommended in practice. Nonetheless, for the sake of completeness, and since it a commonly used method in practice, the general procedure is outlined below as follows (which also generally applies to the different hypothesis tests presented later): formulate the hypothesis to be tested (for instance, the null hypothesis stating that the proportions are the same; consequently, the alternative hypothesis that the proportions are different, if we use a two-tailed test); decide upon a significance threshold (for instance, if the probability of observing a difference more extreme than the one observed is more than 5%, then we plan to reject the null hypothesis); analyze the data, compute the test statistic (here: z-score), and compare its associated p-value (probability) to the previously determined significance threshold; based on the p-value and significance threshold, either accept or reject the null hypothesis at the given confidence level and interpret the results. The z-score is computed as the observed difference divided by the square root for their combined variances z = \\frac{ACC_1 - ACC_2}{\\sqrt{\\sigma_{1}^2 + \\sigma_{2}^2}}, where ACC_1 is the accuracy of one model and ACC_2 is the accuracy of a second model estimated from the test set. Recall that we computed the variance of the estimated of the estimated accuracy as \\sigma^2 = \\frac{ACC(1-ACC)}{n} in Part I and then computed the confidence interval (Normal Approximation Interval) as ACC \\pm z \\times \\sigma, where z=1.96 for a 95% confidence interval. Comparing the confidence intervals of two accuracy estimates and checking whether they overlap is then analogous to computing the z value for the difference in proportions and comparing the probability (p-value) to the chosen significance threshold. So, to compute the z-score directly for the difference of two proportions, ACC_1 and ACC_2 , we pool these proportions (assuming that ACC_1 and ACC_2 are the performances of two models estimated on two indendent test sets of size n_1 and n_2 , respectively), ACC_{1, 2} = \\frac{ACC_1 \\times n_1 + ACC_2 \\times n_2}{n_1 + n_2}, and compute the standard deviation as \\sigma_{1,2} = \\sqrt{\\frac{ACC_{1, 2} (1 - ACC_{1, 2})}{n_1 + n_2}}, such that we can compute the z-score, z = \\frac{ACC_1 - ACC_2}{\\sigma_{1,2}}. Since, due to using the same test set (and violating the independence assumption) we have n_1 = n_2 = n , so that we can simplify the z-score computation to z = \\frac{ACC_1 - ACC_2}{\\sqrt{2\\sigma^2}} = \\frac{ACC_1 - ACC_2}{\\sqrt{2\\cdot ACC_{1,2}(1-ACC_{1,2}))/n}}. where ACC_{1, 2} is simply (ACC_1 + ACC_2)/2 . In the second step, based on the computed z value (this assumes the the test errors are independent, which is usually violated in practice as we use the same test set) we can reject the null hypothesis that the a pair of models has equal performance (here, measured in \"classification aaccuracy\") at an \\alpha=0.05 level if z is greater than 1.96. Or if we want to put in the extra work, we can compute the area under the a standard normal cumulative distribution at the z-score threshold. If we find this p-value is smaller than a significance level we set prior to conducting the test, then we can reject the null hypothesis at that given significance level. The problem with this test though is that we use the same test set to compute the accuracy of the two classifiers; thus, it might be better to use a paired test such as a paired sample t-test, but a more robust alternative is the McNemar test.","title":"Overview"},{"location":"user_guide/evaluate/proportion_difference/#references","text":"[1] Dietterich, Thomas G. \"Approximate statistical tests for comparing supervised classification learning algorithms.\" Neural computation 10, no. 7 (1998): 1895-1923.","title":"References"},{"location":"user_guide/evaluate/proportion_difference/#example-1-difference-of-proportions","text":"As an example for applying this test, consider the following 2 model predictions: import numpy as np ## Dataset: # ground truth labels of the test dataset: y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # predictions by 3 classifiers (`y_model_1`, `y_model_2`, and `y_model_3`): y_model_1 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) y_model_2 = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) Assume, the test accuracies are as follows: acc_1 = np.sum(y_true == y_model_1) / y_true.shape[0] acc_2 = np.sum(y_true == y_model_2) / y_true.shape[0] print('Accuracy Model 1:', acc_1) print('Accuracy Model 2:', acc_2) Accuracy Model 1: 0.84 Accuracy Model 2: 0.92 Now, setting a significance threshold of \\alpha=0.05 and conducting the test from mlxtend.evaluate import proportion_difference z, p_value = proportion_difference(acc_1, acc_2, n_1=y_true.shape[0]) print('z: %.3f' % z) print('p-value: %.3f' % p_value) z: -1.754 p-value: 0.040 we find that there is a statistically significant difference between the model performances. It should be highlighted though that using this test, due to the typical independence violation of using the same test set as well as its high false positive rate, it is not recommended to use this test in practice.","title":"Example 1 - Difference of Proportions"},{"location":"user_guide/evaluate/proportion_difference/#api","text":"proportion_difference(proportion_1, proportion_2, n_1, n_2=None) Computes the test statistic and p-value for a difference of proportions test. Parameters proportion_1 : float The first proportion proportion_2 : float The second proportion n_1 : int The sample size of the first test sample n_2 : int or None (default=None) The sample size of the second test sample. If None , n_1 = n_2 . Returns z, p : float or None, float Returns the z-score and the p-value Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/proportion_difference/","title":"API"},{"location":"user_guide/evaluate/scoring/","text":"Scoring A function for computing various different performance metrics. from mlxtend.evaluate import scoring Overview Confusion Matrix The confusion matrix (or error matrix ) is one way to summarize the performance of a classifier for binary classification tasks. This square matrix consists of columns and rows that list the number of instances as absolute or relative \"actual class\" vs. \"predicted class\" ratios. Let P be the label of class 1 and N be the label of a second class or the label of all classes that are not class 1 in a multi-class setting. Error and Accuracy Both the prediction error (ERR) and accuracy (ACC) provide general information about how many samples are misclassified. The error can be understood as the sum of all false predictions divided by the number of total predications, and the the accuracy is calculated as the sum of correct predictions divided by the total number of predictions, respectively. ERR = \\frac{FP + FN}{FP+ FN + TP + TN} = 1-ACC ACC = \\frac{TP + TN}{FP+ FN + TP + TN} = 1-ERR True and False Positive Rates The True Positive Rate (TPR) and False Positive Rate (FPR) are performance metrics that are especially useful for imbalanced class problems. In spam classification , for example, we are of course primarily interested in the detection and filtering out of spam . However, it is also important to decrease the number of messages that were incorrectly classified as spam ( False Positives ): A situation where a person misses an important message is considered as \"worse\" than a situation where a person ends up with a few spam messages in his e-mail inbox. In contrast to the FPR , the True Positive Rate provides useful information about the fraction of positive (or relevant ) samples that were correctly identified out of the total pool of Positives . FPR = \\frac{FP}{N} = \\frac{FP}{FP + TN} TPR = \\frac{TP}{P} = \\frac{TP}{FN + TP} Precision, Recall, and the F1-Score Precision (PRE) and Recall (REC) are metrics that are more commonly used in Information Technology and related to the False and True Prositive Rates . In fact, Recall is synonymous to the True Positive Rate and also sometimes called Sensitivity . The F _1 -Score can be understood as a combination of both Precision and Recall . PRE = \\frac{TP}{TP + FP} REC = TPR = \\frac{TP}{P} = \\frac{TP}{FN + TP} F_1 = 2 \\cdot \\frac{PRE \\cdot REC}{PRE + REC} Sensitivity and Specificity Sensitivity (SEN) is synonymous to Recall and the True Positive Rate whereas Specificity (SPC) is synonymous to the True Negative Rate -- Sensitivity measures the recovery rate of the Positives and complimentary, the Specificity measures the recovery rate of the Negatives . SEN = TPR = REC = \\frac{TP}{P} = \\frac{TP}{FN + TP} SPC = TNR =\\frac{TN}{N} = \\frac{TN}{FP + TN} Matthews Correlation Coefficient Matthews correlation coefficient (MCC) was first formulated by Brian W. Matthews [3] in 1975 to assess the performance of protein secondary structure predictions. The MCC can be understood as a specific case of a linear correlation coefficient ( Pearson's R ) for a binary classification setting and is considered as especially useful in unbalanced class settings. The previous metrics take values in the range between 0 (worst) and 1 (best), whereas the MCC is bounded between the range 1 (perfect correlation between ground truth and predicted outcome) and -1 (inverse or negative correlation) -- a value of 0 denotes a random prediction. MCC = \\frac{ TP \\times TN - FP \\times FN } {\\sqrt{ (TP + FP) ( TP + FN ) ( TN + FP ) ( TN + FN ) } } Average Per-Class Accuracy The \"overall\" accuracy is defined as the number of correct predictions ( true positives TP and true negatives TN) over all samples n : ACC = \\frac{TP + TN}{n} in a binary class setting: In a multi-class setting, we can generalize the computation of the accuracy as the fraction of all true predictions (the diagonal) over all samples n. ACC = \\frac{T}{n} Considering a multi-class problem with 3 classes (C0, C1, C2) let's assume our model made the following predictions: We compute the accuracy as: ACC = \\frac{3 + 50 + 18}{90} \\approx 0.79 Now, in order to compute the average per-class accuracy , we compute the binary accuracy for each class label separately; i.e., if class 1 is the positive class, class 0 and 2 are both considered the negative class. APC\\;ACC = \\frac{83/90 + 71/90 + 78/90}{3} \\approx 0.86 References [1] S. Raschka. An overview of general performance metrics of binary classifier systems . Computing Research Repository (CoRR), abs/1410.5330, 2014. [2] Cyril Goutte and Eric Gaussier. A probabilistic interpretation of precision, recall and f-score, with implication for evaluation . In Advances in Information Retrieval, pages 345\u2013359. Springer, 2005. [3] Brian W Matthews. Comparison of the predicted and observed secondary structure of T4 phage lysozyme . Biochimica et Biophysica Acta (BBA)- Protein Structure, 405(2):442\u2013451, 1975. Example 1 - Classification Error from mlxtend.evaluate import scoring y_targ = [1, 1, 1, 0, 0, 2, 0, 3] y_pred = [1, 0, 1, 0, 0, 2, 1, 3] res = scoring(y_target=y_targ, y_predicted=y_pred, metric='error') print('Error: %s%%' % (res * 100)) Error: 25.0% API scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"Scoring"},{"location":"user_guide/evaluate/scoring/#scoring","text":"A function for computing various different performance metrics. from mlxtend.evaluate import scoring","title":"Scoring"},{"location":"user_guide/evaluate/scoring/#overview","text":"","title":"Overview"},{"location":"user_guide/evaluate/scoring/#confusion-matrix","text":"The confusion matrix (or error matrix ) is one way to summarize the performance of a classifier for binary classification tasks. This square matrix consists of columns and rows that list the number of instances as absolute or relative \"actual class\" vs. \"predicted class\" ratios. Let P be the label of class 1 and N be the label of a second class or the label of all classes that are not class 1 in a multi-class setting.","title":"Confusion Matrix"},{"location":"user_guide/evaluate/scoring/#error-and-accuracy","text":"Both the prediction error (ERR) and accuracy (ACC) provide general information about how many samples are misclassified. The error can be understood as the sum of all false predictions divided by the number of total predications, and the the accuracy is calculated as the sum of correct predictions divided by the total number of predictions, respectively. ERR = \\frac{FP + FN}{FP+ FN + TP + TN} = 1-ACC ACC = \\frac{TP + TN}{FP+ FN + TP + TN} = 1-ERR","title":"Error and Accuracy"},{"location":"user_guide/evaluate/scoring/#true-and-false-positive-rates","text":"The True Positive Rate (TPR) and False Positive Rate (FPR) are performance metrics that are especially useful for imbalanced class problems. In spam classification , for example, we are of course primarily interested in the detection and filtering out of spam . However, it is also important to decrease the number of messages that were incorrectly classified as spam ( False Positives ): A situation where a person misses an important message is considered as \"worse\" than a situation where a person ends up with a few spam messages in his e-mail inbox. In contrast to the FPR , the True Positive Rate provides useful information about the fraction of positive (or relevant ) samples that were correctly identified out of the total pool of Positives . FPR = \\frac{FP}{N} = \\frac{FP}{FP + TN} TPR = \\frac{TP}{P} = \\frac{TP}{FN + TP}","title":"True and False Positive Rates"},{"location":"user_guide/evaluate/scoring/#precision-recall-and-the-f1-score","text":"Precision (PRE) and Recall (REC) are metrics that are more commonly used in Information Technology and related to the False and True Prositive Rates . In fact, Recall is synonymous to the True Positive Rate and also sometimes called Sensitivity . The F _1 -Score can be understood as a combination of both Precision and Recall . PRE = \\frac{TP}{TP + FP} REC = TPR = \\frac{TP}{P} = \\frac{TP}{FN + TP} F_1 = 2 \\cdot \\frac{PRE \\cdot REC}{PRE + REC}","title":"Precision, Recall, and the F1-Score"},{"location":"user_guide/evaluate/scoring/#sensitivity-and-specificity","text":"Sensitivity (SEN) is synonymous to Recall and the True Positive Rate whereas Specificity (SPC) is synonymous to the True Negative Rate -- Sensitivity measures the recovery rate of the Positives and complimentary, the Specificity measures the recovery rate of the Negatives . SEN = TPR = REC = \\frac{TP}{P} = \\frac{TP}{FN + TP} SPC = TNR =\\frac{TN}{N} = \\frac{TN}{FP + TN}","title":"Sensitivity and Specificity"},{"location":"user_guide/evaluate/scoring/#matthews-correlation-coefficient","text":"Matthews correlation coefficient (MCC) was first formulated by Brian W. Matthews [3] in 1975 to assess the performance of protein secondary structure predictions. The MCC can be understood as a specific case of a linear correlation coefficient ( Pearson's R ) for a binary classification setting and is considered as especially useful in unbalanced class settings. The previous metrics take values in the range between 0 (worst) and 1 (best), whereas the MCC is bounded between the range 1 (perfect correlation between ground truth and predicted outcome) and -1 (inverse or negative correlation) -- a value of 0 denotes a random prediction. MCC = \\frac{ TP \\times TN - FP \\times FN } {\\sqrt{ (TP + FP) ( TP + FN ) ( TN + FP ) ( TN + FN ) } }","title":"Matthews Correlation Coefficient"},{"location":"user_guide/evaluate/scoring/#average-per-class-accuracy","text":"The \"overall\" accuracy is defined as the number of correct predictions ( true positives TP and true negatives TN) over all samples n : ACC = \\frac{TP + TN}{n} in a binary class setting: In a multi-class setting, we can generalize the computation of the accuracy as the fraction of all true predictions (the diagonal) over all samples n. ACC = \\frac{T}{n} Considering a multi-class problem with 3 classes (C0, C1, C2) let's assume our model made the following predictions: We compute the accuracy as: ACC = \\frac{3 + 50 + 18}{90} \\approx 0.79 Now, in order to compute the average per-class accuracy , we compute the binary accuracy for each class label separately; i.e., if class 1 is the positive class, class 0 and 2 are both considered the negative class. APC\\;ACC = \\frac{83/90 + 71/90 + 78/90}{3} \\approx 0.86","title":"Average Per-Class Accuracy"},{"location":"user_guide/evaluate/scoring/#references","text":"[1] S. Raschka. An overview of general performance metrics of binary classifier systems . Computing Research Repository (CoRR), abs/1410.5330, 2014. [2] Cyril Goutte and Eric Gaussier. A probabilistic interpretation of precision, recall and f-score, with implication for evaluation . In Advances in Information Retrieval, pages 345\u2013359. Springer, 2005. [3] Brian W Matthews. Comparison of the predicted and observed secondary structure of T4 phage lysozyme . Biochimica et Biophysica Acta (BBA)- Protein Structure, 405(2):442\u2013451, 1975.","title":"References"},{"location":"user_guide/evaluate/scoring/#example-1-classification-error","text":"from mlxtend.evaluate import scoring y_targ = [1, 1, 1, 0, 0, 2, 0, 3] y_pred = [1, 0, 1, 0, 0, 2, 1, 3] res = scoring(y_target=y_targ, y_predicted=y_pred, metric='error') print('Error: %s%%' % (res * 100)) Error: 25.0%","title":"Example 1 - Classification Error"},{"location":"user_guide/evaluate/scoring/#api","text":"scoring(y_target, y_predicted, metric='error', positive_label=1, unique_labels='auto') Compute a scoring metric for supervised learning. Parameters y_target : array-like, shape=[n_values] True class labels or target values. y_predicted : array-like, shape=[n_values] Predicted class labels or target values. metric : str (default: 'error') Performance metric: 'accuracy': (TP + TN)/(FP + FN + TP + TN) = 1-ERR 'per-class accuracy': Average per-class accuracy 'per-class error': Average per-class error 'error': (TP + TN)/(FP+ FN + TP + TN) = 1-ACC 'false_positive_rate': FP/N = FP/(FP + TN) 'true_positive_rate': TP/P = TP/(FN + TP) 'true_negative_rate': TN/N = TN/(FP + TN) 'precision': TP/(TP + FP) 'recall': equal to 'true_positive_rate' 'sensitivity': equal to 'true_positive_rate' or 'recall' 'specificity': equal to 'true_negative_rate' 'f1': 2 * (PRE * REC)/(PRE + REC) 'matthews_corr_coef': (TP TN - FP FN) / (sqrt{(TP + FP)( TP + FN )( TN + FP )( TN + FN )}) Where: [TP: True positives, TN = True negatives, TN: True negatives, FN = False negatives] positive_label : int (default: 1) Label of the positive class for binary classification metrics. unique_labels : str or array-like (default: 'auto') If 'auto', deduces the unique class labels from y_target Returns score : float Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/","title":"API"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/","text":"Linear Discriminant Analysis Implementation of Linear Discriminant Analysis for dimensionality reduction from mlxtend.feature_extraction import LinearDiscriminantAnalysis Overview Linear Discriminant Analysis (LDA) is most commonly used as dimensionality reduction technique in the pre-processing step for pattern-classification and machine learning applications. The goal is to project a dataset onto a lower-dimensional space with good class-separability in order avoid overfitting (\"curse of dimensionality\") and also reduce computational costs. Ronald A. Fisher formulated the Linear Discriminant in 1936 ( The Use of Multiple Measurements in Taxonomic Problems ), and it also has some practical uses as classifier. The original Linear discriminant was described for a 2-class problem, and it was then later generalized as \"multi-class Linear Discriminant Analysis\" or \"Multiple Discriminant Analysis\" by C. R. Rao in 1948 ( The utilization of multiple measurements in problems of biological classification ) The general LDA approach is very similar to a Principal Component Analysis, but in addition to finding the component axes that maximize the variance of our data (PCA), we are additionally interested in the axes that maximize the separation between multiple classes (LDA). So, in a nutshell, often the goal of an LDA is to project a feature space (a dataset n-dimensional samples) onto a smaller subspace k (where k \\leq n-1 ) while maintaining the class-discriminatory information. In general, dimensionality reduction does not only help reducing computational costs for a given classification task, but it can also be helpful to avoid overfitting by minimizing the error in parameter estimation (\"curse of dimensionality\"). Summarizing the LDA approach in 5 steps Listed below are the 5 general steps for performing a linear discriminant analysis. Compute the d -dimensional mean vectors for the different classes from the dataset. Compute the scatter matrices (in-between-class and within-class scatter matrix). Compute the eigenvectors ( \\mathbf{e_1}, \\; \\mathbf{e_2}, \\; ..., \\; \\mathbf{e_d} ) and corresponding eigenvalues ( \\mathbf{\\lambda_1}, \\; \\mathbf{\\lambda_2}, \\; ..., \\; \\mathbf{\\lambda_d} ) for the scatter matrices. Sort the eigenvectors by decreasing eigenvalues and choose k eigenvectors with the largest eigenvalues to form a k \\times d dimensional matrix \\mathbf{W} (where every column represents an eigenvector). Use this k \\times d eigenvector matrix to transform the samples onto the new subspace. This can be summarized by the mathematical equation: \\mathbf{Y} = \\mathbf{X} \\times \\mathbf{W} (where \\mathbf{X} is a n \\times d -dimensional matrix representing the n samples, and \\mathbf{y} are the transformed n \\times k -dimensional samples in the new subspace). References Fisher, Ronald A. \" The use of multiple measurements in taxonomic problems. \" Annals of eugenics 7.2 (1936): 179-188. Rao, C. Radhakrishna. \" The utilization of multiple measurements in problems of biological classification. \" Journal of the Royal Statistical Society. Series B (Methodological) 10.2 (1948): 159-203. Example 1 - LDA on Iris from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import LinearDiscriminantAnalysis X, y = iris_data() X = standardize(X) lda = LinearDiscriminantAnalysis(n_discriminants=2) lda.fit(X, y) X_lda = lda.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_lda[y == lab, 0], X_lda[y == lab, 1], label=lab, c=col) plt.xlabel('Linear Discriminant 1') plt.ylabel('Linear Discriminant 2') plt.legend(loc='lower right') plt.tight_layout() plt.show() Example 2 - Plotting the Between-Class Variance Explained Ratio from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import LinearDiscriminantAnalysis X, y = iris_data() X = standardize(X) lda = LinearDiscriminantAnalysis(n_discriminants=None) lda.fit(X, y) X_lda = lda.transform(X) import numpy as np tot = sum(lda.e_vals_) var_exp = [(i / tot)*100 for i in sorted(lda.e_vals_, reverse=True)] cum_var_exp = np.cumsum(var_exp) with plt.style.context('seaborn-whitegrid'): fig, ax = plt.subplots(figsize=(6, 4)) plt.bar(range(4), var_exp, alpha=0.5, align='center', label='individual explained variance') plt.step(range(4), cum_var_exp, where='mid', label='cumulative explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.xticks(range(4)) ax.set_xticklabels(np.arange(1, X.shape[1] + 1)) plt.legend(loc='best') plt.tight_layout() API LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/ Methods fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors.","title":"Linear Discriminant Analysis"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#linear-discriminant-analysis","text":"Implementation of Linear Discriminant Analysis for dimensionality reduction from mlxtend.feature_extraction import LinearDiscriminantAnalysis","title":"Linear Discriminant Analysis"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#overview","text":"Linear Discriminant Analysis (LDA) is most commonly used as dimensionality reduction technique in the pre-processing step for pattern-classification and machine learning applications. The goal is to project a dataset onto a lower-dimensional space with good class-separability in order avoid overfitting (\"curse of dimensionality\") and also reduce computational costs. Ronald A. Fisher formulated the Linear Discriminant in 1936 ( The Use of Multiple Measurements in Taxonomic Problems ), and it also has some practical uses as classifier. The original Linear discriminant was described for a 2-class problem, and it was then later generalized as \"multi-class Linear Discriminant Analysis\" or \"Multiple Discriminant Analysis\" by C. R. Rao in 1948 ( The utilization of multiple measurements in problems of biological classification ) The general LDA approach is very similar to a Principal Component Analysis, but in addition to finding the component axes that maximize the variance of our data (PCA), we are additionally interested in the axes that maximize the separation between multiple classes (LDA). So, in a nutshell, often the goal of an LDA is to project a feature space (a dataset n-dimensional samples) onto a smaller subspace k (where k \\leq n-1 ) while maintaining the class-discriminatory information. In general, dimensionality reduction does not only help reducing computational costs for a given classification task, but it can also be helpful to avoid overfitting by minimizing the error in parameter estimation (\"curse of dimensionality\").","title":"Overview"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#summarizing-the-lda-approach-in-5-steps","text":"Listed below are the 5 general steps for performing a linear discriminant analysis. Compute the d -dimensional mean vectors for the different classes from the dataset. Compute the scatter matrices (in-between-class and within-class scatter matrix). Compute the eigenvectors ( \\mathbf{e_1}, \\; \\mathbf{e_2}, \\; ..., \\; \\mathbf{e_d} ) and corresponding eigenvalues ( \\mathbf{\\lambda_1}, \\; \\mathbf{\\lambda_2}, \\; ..., \\; \\mathbf{\\lambda_d} ) for the scatter matrices. Sort the eigenvectors by decreasing eigenvalues and choose k eigenvectors with the largest eigenvalues to form a k \\times d dimensional matrix \\mathbf{W} (where every column represents an eigenvector). Use this k \\times d eigenvector matrix to transform the samples onto the new subspace. This can be summarized by the mathematical equation: \\mathbf{Y} = \\mathbf{X} \\times \\mathbf{W} (where \\mathbf{X} is a n \\times d -dimensional matrix representing the n samples, and \\mathbf{y} are the transformed n \\times k -dimensional samples in the new subspace).","title":"Summarizing the LDA approach in 5 steps"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#references","text":"Fisher, Ronald A. \" The use of multiple measurements in taxonomic problems. \" Annals of eugenics 7.2 (1936): 179-188. Rao, C. Radhakrishna. \" The utilization of multiple measurements in problems of biological classification. \" Journal of the Royal Statistical Society. Series B (Methodological) 10.2 (1948): 159-203.","title":"References"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#example-1-lda-on-iris","text":"from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import LinearDiscriminantAnalysis X, y = iris_data() X = standardize(X) lda = LinearDiscriminantAnalysis(n_discriminants=2) lda.fit(X, y) X_lda = lda.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_lda[y == lab, 0], X_lda[y == lab, 1], label=lab, c=col) plt.xlabel('Linear Discriminant 1') plt.ylabel('Linear Discriminant 2') plt.legend(loc='lower right') plt.tight_layout() plt.show()","title":"Example 1 - LDA on Iris"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#example-2-plotting-the-between-class-variance-explained-ratio","text":"from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import LinearDiscriminantAnalysis X, y = iris_data() X = standardize(X) lda = LinearDiscriminantAnalysis(n_discriminants=None) lda.fit(X, y) X_lda = lda.transform(X) import numpy as np tot = sum(lda.e_vals_) var_exp = [(i / tot)*100 for i in sorted(lda.e_vals_, reverse=True)] cum_var_exp = np.cumsum(var_exp) with plt.style.context('seaborn-whitegrid'): fig, ax = plt.subplots(figsize=(6, 4)) plt.bar(range(4), var_exp, alpha=0.5, align='center', label='individual explained variance') plt.step(range(4), cum_var_exp, where='mid', label='cumulative explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.xticks(range(4)) ax.set_xticklabels(np.arange(1, X.shape[1] + 1)) plt.legend(loc='best') plt.tight_layout()","title":"Example 2 - Plotting the Between-Class Variance Explained Ratio"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#api","text":"LinearDiscriminantAnalysis(n_discriminants=None) Linear Discriminant Analysis Class Parameters n_discriminants : int (default: None) The number of discrimants for transformation. Keeps the original dimensions of the dataset if None . Attributes w_ : array-like, shape=[n_features, n_discriminants] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/","title":"API"},{"location":"user_guide/feature_extraction/LinearDiscriminantAnalysis/#methods","text":"fit(X, y, n_classes=None) Fit the LDA model with X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. n_classes : int (default: None) A positive integer to declare the number of class labels if not all class labels are present in a partial training set. Gets the number of class labels automatically if None. Returns self : object transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_discriminants] Projected training vectors.","title":"Methods"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/","text":"Principal Component Analysis Implementation of Principal Component Analysis for dimensionality reduction from mlxtend.feature_extraction import PrincipalComponentAnalysis Overview The sheer size of data in the modern age is not only a challenge for computer hardware but also a main bottleneck for the performance of many machine learning algorithms. The main goal of a PCA analysis is to identify patterns in data; PCA aims to detect the correlation between variables. If a strong correlation between variables exists, the attempt to reduce the dimensionality only makes sense. In a nutshell, this is what PCA is all about: Finding the directions of maximum variance in high-dimensional data and project it onto a smaller dimensional subspace while retaining most of the information. PCA and Dimensionality Reduction Often, the desired goal is to reduce the dimensions of a d -dimensional dataset by projecting it onto a (k) -dimensional subspace (where k\\;<\\;d ) in order to increase the computational efficiency while retaining most of the information. An important question is \"what is the size of k that represents the data 'well'?\" Later, we will compute eigenvectors (the principal components) of a dataset and collect them in a projection matrix. Each of those eigenvectors is associated with an eigenvalue which can be interpreted as the \"length\" or \"magnitude\" of the corresponding eigenvector. If some eigenvalues have a significantly larger magnitude than others that the reduction of the dataset via PCA onto a smaller dimensional subspace by dropping the \"less informative\" eigenpairs is reasonable. A Summary of the PCA Approach Standardize the data. Obtain the Eigenvectors and Eigenvalues from the covariance matrix or correlation matrix, or perform Singular Vector Decomposition. Sort eigenvalues in descending order and choose the k eigenvectors that correspond to the k largest eigenvalues where k is the number of dimensions of the new feature subspace ( k \\le d ). Construct the projection matrix \\mathbf{W} from the selected k eigenvectors. Transform the original dataset \\mathbf{X} via \\mathbf{W} to obtain a k -dimensional feature subspace \\mathbf{Y} . References Pearson, Karl. \"LIII. On lines and planes of closest fit to systems of points in space. \" The London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science 2.11 (1901): 559-572. Example 1 - PCA on Iris from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2) pca.fit(X) X_pca = pca.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_pca[y==lab, 0], X_pca[y==lab, 1], label=lab, c=col) plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.legend(loc='lower center') plt.tight_layout() plt.show() Example 2 - Plotting the Variance Explained Ratio from mlxtend.data import iris_data from mlxtend.preprocessing import standardize X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=None) pca.fit(X) X_pca = pca.transform(X) import numpy as np tot = sum(pca.e_vals_) var_exp = [(i / tot)*100 for i in sorted(pca.e_vals_, reverse=True)] cum_var_exp = np.cumsum(var_exp) with plt.style.context('seaborn-whitegrid'): fig, ax = plt.subplots(figsize=(6, 4)) plt.bar(range(4), var_exp, alpha=0.5, align='center', label='individual explained variance') plt.step(range(4), cum_var_exp, where='mid', label='cumulative explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.xticks(range(4)) ax.set_xticklabels(np.arange(1, X.shape[1] + 1)) plt.legend(loc='best') plt.tight_layout() Example 3 - PCA via SVD While the eigendecomposition of the covariance or correlation matrix may be more intuitiuve, most PCA implementations perform a Singular Vector Decomposition (SVD) to improve the computational efficiency. Another advantage of using SVD is that the results tend to be more numerically stable, since we can decompose the input matrix directly without the additional covariance-matrix step. from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2, solver='svd') pca.fit(X) X_pca = pca.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_pca[y==lab, 0], X_pca[y==lab, 1], label=lab, c=col) plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.legend(loc='lower center') plt.tight_layout() plt.show() If we compare this PCA projection to the previous plot in example 1, we notice that they are mirror images of each other. Note that this is not due to an error in any of those two implementations, but the reason for this difference is that, depending on the eigensolver, eigenvectors can have either negative or positive signs. For instance, if v is an eigenvector of a matrix \\Sigma , we have \\Sigma v = \\lambda v, where \\lambda is our eigenvalue then -v is also an eigenvector that has the same eigenvalue, since \\Sigma(-v) = -\\Sigma v = -\\lambda v = \\lambda(-v). Example 4 - Factor Loadings After evoking the fit method, the factor loadings are available via the loadings_ attribute. In simple terms, the the loadings are the unstandardized values of the eigenvectors. Or in other words, we can interpret the loadings as the covariances (or correlation in case we standardized the input features) between the input features and the and the principal components (or eigenvectors), which have been scaled to unit length. By having the loadings scaled, they become comparable by magnitude and we can assess how much variance in a component is attributed to the input features (as the components are just a weighted linear combination of the input features). from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis import matplotlib.pyplot as plt X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2, solver='eigen') pca.fit(X); xlabels = ['sepal length', 'sepal width', 'petal length', 'petal width'] fig, ax = plt.subplots(1, 2, figsize=(8, 3)) ax[0].bar(range(4), pca.loadings_[:, 0], align='center') ax[1].bar(range(4), pca.loadings_[:, 1], align='center') ax[0].set_ylabel('Factor loading onto PC1') ax[1].set_ylabel('Factor loading onto PC2') ax[0].set_xticks(range(4)) ax[1].set_xticks(range(4)) ax[0].set_xticklabels(xlabels, rotation=45) ax[1].set_xticklabels(xlabels, rotation=45) plt.ylim([-1, 1]) plt.tight_layout() For instance, we may say that most of the variance in the first component is attributed to the petal features (although the loading of sepal length on PC1 is also not much less in magnitude). In contrast, the remaining variance captured by PC2 is mostly due to the sepal width. Note that we know from Example 2 that PC1 explains most of the variance, and based on the information from the loading plots, we may say that petal features combined with sepal length may explain most of the spread in the data. API PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"Principal Component Analysis"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#principal-component-analysis","text":"Implementation of Principal Component Analysis for dimensionality reduction from mlxtend.feature_extraction import PrincipalComponentAnalysis","title":"Principal Component Analysis"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#overview","text":"The sheer size of data in the modern age is not only a challenge for computer hardware but also a main bottleneck for the performance of many machine learning algorithms. The main goal of a PCA analysis is to identify patterns in data; PCA aims to detect the correlation between variables. If a strong correlation between variables exists, the attempt to reduce the dimensionality only makes sense. In a nutshell, this is what PCA is all about: Finding the directions of maximum variance in high-dimensional data and project it onto a smaller dimensional subspace while retaining most of the information.","title":"Overview"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#pca-and-dimensionality-reduction","text":"Often, the desired goal is to reduce the dimensions of a d -dimensional dataset by projecting it onto a (k) -dimensional subspace (where k\\;<\\;d ) in order to increase the computational efficiency while retaining most of the information. An important question is \"what is the size of k that represents the data 'well'?\" Later, we will compute eigenvectors (the principal components) of a dataset and collect them in a projection matrix. Each of those eigenvectors is associated with an eigenvalue which can be interpreted as the \"length\" or \"magnitude\" of the corresponding eigenvector. If some eigenvalues have a significantly larger magnitude than others that the reduction of the dataset via PCA onto a smaller dimensional subspace by dropping the \"less informative\" eigenpairs is reasonable.","title":"PCA and Dimensionality Reduction"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#a-summary-of-the-pca-approach","text":"Standardize the data. Obtain the Eigenvectors and Eigenvalues from the covariance matrix or correlation matrix, or perform Singular Vector Decomposition. Sort eigenvalues in descending order and choose the k eigenvectors that correspond to the k largest eigenvalues where k is the number of dimensions of the new feature subspace ( k \\le d ). Construct the projection matrix \\mathbf{W} from the selected k eigenvectors. Transform the original dataset \\mathbf{X} via \\mathbf{W} to obtain a k -dimensional feature subspace \\mathbf{Y} .","title":"A Summary of the PCA Approach"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#references","text":"Pearson, Karl. \"LIII. On lines and planes of closest fit to systems of points in space. \" The London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science 2.11 (1901): 559-572.","title":"References"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#example-1-pca-on-iris","text":"from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2) pca.fit(X) X_pca = pca.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_pca[y==lab, 0], X_pca[y==lab, 1], label=lab, c=col) plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.legend(loc='lower center') plt.tight_layout() plt.show()","title":"Example 1 - PCA on Iris"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#example-2-plotting-the-variance-explained-ratio","text":"from mlxtend.data import iris_data from mlxtend.preprocessing import standardize X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=None) pca.fit(X) X_pca = pca.transform(X) import numpy as np tot = sum(pca.e_vals_) var_exp = [(i / tot)*100 for i in sorted(pca.e_vals_, reverse=True)] cum_var_exp = np.cumsum(var_exp) with plt.style.context('seaborn-whitegrid'): fig, ax = plt.subplots(figsize=(6, 4)) plt.bar(range(4), var_exp, alpha=0.5, align='center', label='individual explained variance') plt.step(range(4), cum_var_exp, where='mid', label='cumulative explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.xticks(range(4)) ax.set_xticklabels(np.arange(1, X.shape[1] + 1)) plt.legend(loc='best') plt.tight_layout()","title":"Example 2 - Plotting the Variance Explained Ratio"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#example-3-pca-via-svd","text":"While the eigendecomposition of the covariance or correlation matrix may be more intuitiuve, most PCA implementations perform a Singular Vector Decomposition (SVD) to improve the computational efficiency. Another advantage of using SVD is that the results tend to be more numerically stable, since we can decompose the input matrix directly without the additional covariance-matrix step. from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2, solver='svd') pca.fit(X) X_pca = pca.transform(X) import matplotlib.pyplot as plt with plt.style.context('seaborn-whitegrid'): plt.figure(figsize=(6, 4)) for lab, col in zip((0, 1, 2), ('blue', 'red', 'green')): plt.scatter(X_pca[y==lab, 0], X_pca[y==lab, 1], label=lab, c=col) plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.legend(loc='lower center') plt.tight_layout() plt.show() If we compare this PCA projection to the previous plot in example 1, we notice that they are mirror images of each other. Note that this is not due to an error in any of those two implementations, but the reason for this difference is that, depending on the eigensolver, eigenvectors can have either negative or positive signs. For instance, if v is an eigenvector of a matrix \\Sigma , we have \\Sigma v = \\lambda v, where \\lambda is our eigenvalue then -v is also an eigenvector that has the same eigenvalue, since \\Sigma(-v) = -\\Sigma v = -\\lambda v = \\lambda(-v).","title":"Example 3 - PCA via SVD"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#example-4-factor-loadings","text":"After evoking the fit method, the factor loadings are available via the loadings_ attribute. In simple terms, the the loadings are the unstandardized values of the eigenvectors. Or in other words, we can interpret the loadings as the covariances (or correlation in case we standardized the input features) between the input features and the and the principal components (or eigenvectors), which have been scaled to unit length. By having the loadings scaled, they become comparable by magnitude and we can assess how much variance in a component is attributed to the input features (as the components are just a weighted linear combination of the input features). from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import PrincipalComponentAnalysis import matplotlib.pyplot as plt X, y = iris_data() X = standardize(X) pca = PrincipalComponentAnalysis(n_components=2, solver='eigen') pca.fit(X); xlabels = ['sepal length', 'sepal width', 'petal length', 'petal width'] fig, ax = plt.subplots(1, 2, figsize=(8, 3)) ax[0].bar(range(4), pca.loadings_[:, 0], align='center') ax[1].bar(range(4), pca.loadings_[:, 1], align='center') ax[0].set_ylabel('Factor loading onto PC1') ax[1].set_ylabel('Factor loading onto PC2') ax[0].set_xticks(range(4)) ax[1].set_xticks(range(4)) ax[0].set_xticklabels(xlabels, rotation=45) ax[1].set_xticklabels(xlabels, rotation=45) plt.ylim([-1, 1]) plt.tight_layout() For instance, we may say that most of the variance in the first component is attributed to the petal features (although the loading of sepal length on PC1 is also not much less in magnitude). In contrast, the remaining variance captured by PC2 is mostly due to the sepal width. Note that we know from Example 2 that PC1 explains most of the variance, and based on the information from the loading plots, we may say that petal features combined with sepal length may explain most of the spread in the data.","title":"Example 4 - Factor Loadings"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#api","text":"PrincipalComponentAnalysis(n_components=None, solver='eigen') Principal Component Analysis Class Parameters n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . solver : str (default: 'eigen') Method for performing the matrix decomposition. {'eigen', 'svd'} Attributes w_ : array-like, shape=[n_features, n_components] Projection matrix e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. loadings_ : array_like, shape=[n_features, n_features] The factor loadings of the original variables onto the principal components. The columns are the principal components, and the rows are the features loadings. For instance, the first column contains the loadings onto the first principal component. Note that the signs may be flipped depending on whether you use the 'eigen' or 'svd' solver; this does not affect the interpretation of the loadings though. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/","title":"API"},{"location":"user_guide/feature_extraction/PrincipalComponentAnalysis/#methods","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object transform(X) Apply the linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"Methods"},{"location":"user_guide/feature_extraction/RBFKernelPCA/","text":"RBF Kernel Principal Component Analysis Implementation of RBF Kernel Principal Component Analysis for non-linear dimensionality reduction from mlxtend.feature_extraction import RBFKernelPCA Overview Most machine learning algorithms have been developed and statistically validated for linearly separable data. Popular examples are linear classifiers like Support Vector Machines (SVMs) or the (standard) Principal Component Analysis (PCA) for dimensionality reduction. However, most real world data requires nonlinear methods in order to perform tasks that involve the analysis and discovery of patterns successfully. The focus of this overview is to briefly introduce the idea of kernel methods and to implement a Gaussian radius basis function (RBF) kernel that is used to perform nonlinear dimensionality reduction via BF kernel principal component analysis (kPCA). Principal Component Analysis The main purpose of principal component analysis (PCA) is the analysis of data to identify patterns that represent the data \u201cwell.\u201d The principal components can be understood as new axes of the dataset that maximize the variance along those axes (the eigenvectors of the covariance matrix). In other words, PCA aims to find the axes with maximum variances along which the data is most spread. For more details, please see the related article on mlxtend.feature_extraction.PrincipalComponentAnalysis . Nonlinear dimensionality reduction The \u201cclassic\u201d PCA approach described above is a linear projection technique that works well if the data is linearly separable. However, in the case of linearly inseparable data, a nonlinear technique is required if the task is to reduce the dimensionality of a dataset. Kernel functions and the kernel trick The basic idea to deal with linearly inseparable data is to project it onto a higher dimensional space where it becomes linearly separable. Let us call this nonlinear mapping function \\phi so that the mapping of a sample \\mathbf{x} can be written as \\mathbf{x} \\rightarrow \\phi (\\mathbf{x}) , which is called \"kernel function.\" Now, the term \"kernel\" describes a function that calculates the dot product of the images of the samples \\mathbf{x} under \\phi . \\kappa(\\mathbf{x_i, x_j}) = \\phi (\\mathbf{x_i}) \\phi (\\mathbf{x_j})^T More details about the derivation of this equation are provided in this excellent review article by Quan Wang: Kernel Principal Component Analysis and its Applications in Face Recognition and Active Shape Models .[ 1 ] In other words, the function \\phi maps the original d-dimensional features into a larger, k-dimensional feature space by creating nononlinear combinations of the original features. For example, if \\mathbf{x} consists of 2 features: \\mathbf{x} = \\big[x_1 \\quad x_2\\big]^T \\quad \\quad \\mathbf{x} \\in I\\!R^d \\Downarrow \\phi \\mathbf{x}' = \\big[x_1 \\quad x_2 \\quad x_1 x_2 \\quad x_{1}^2 \\quad x_1 x_{2}^3 \\quad \\dots \\big]^T \\quad \\quad \\mathbf{x} \\in I\\!R^k (k >> d) Often, the mathematical definition of the RBF kernel is written and implemented as \\kappa(\\mathbf{x_i, x_j}) = exp\\bigg(- \\gamma \\; \\lVert\\mathbf{x_i - x_j }\\rVert^{2}_{2} \\bigg) where \\textstyle\\gamma = \\tfrac{1}{2\\sigma^2} is a free parameter that is to be optimized. Gaussian radial basis function (RBF) Kernel PCA In the linear PCA approach, we are interested in the principal components that maximize the variance in the dataset. This is done by extracting the eigenvectors (principle components) that correspond to the largest eigenvalues based on the covariance matrix: \\text{Cov} = \\frac{1}{N} \\sum_{i=1}^{N} \\mathbf{x_i} \\mathbf{x_i}^T Bernhard Scholkopf ( Kernel Principal Component Analysis [ 2 ]) generalized this approach for data that was mapped onto the higher dimensional space via a kernel function: \\text{Cov} = \\frac{1}{N} \\sum_{i=1}^{N} \\phi(\\mathbf{x_i}) \\phi(\\mathbf{x_i})^T However, in practice the the covariance matrix in the higher dimensional space is not calculated explicitly (kernel trick). Therefore, the implementation of RBF kernel PCA does not yield the principal component axes (in contrast to the standard PCA), but the obtained eigenvectors can be understood as projections of the data onto the principal components. RBF kernel PCA step-by-step 1. Computation of the kernel (similarity) matrix. In this first step, we need to calculate \\kappa(\\mathbf{x_i, x_j}) = exp\\bigg(- \\gamma \\; \\lVert\\mathbf{x_i - x_j }\\rVert^{2}_{2} \\bigg) for every pair of points. E.g., if we have a dataset of 100 samples, this step would result in a symmetric 100x100 kernel matrix. 2. Eigendecomposition of the kernel matrix. Since it is not guaranteed that the kernel matrix is centered, we can apply the following equation to do so: K' = K - \\mathbf{1_N} K - K \\mathbf{1_N} + \\mathbf{1_N} K \\mathbf{1_N} where \\mathbf{1_N} is (like the kernel matrix) a N\\times N matrix with all values equal to \\frac{1}{N} . [ 3 ] Now, we have to obtain the eigenvectors of the centered kernel matrix that correspond to the largest eigenvalues. Those eigenvectors are the data points already projected onto the respective principal components. Projecting new data So far, so good, in the sections above, we have been projecting an dataset onto a new feature subspace. However, in a real application, we are usually interested in mapping new data points onto the same new feature subspace (e.g., if are working with a training and a test dataset in pattern classification tasks). Remember, when we computed the eigenvectors \\mathbf{\\alpha} of the centered kernel matrix, those values were actually already the projected datapoints onto the principal component axis \\mathbf{g} . If we want to project a new data point \\mathbf{x} onto this principal component axis, we'd need to compute \\phi(\\mathbf{x})^T \\mathbf{g} . Fortunately, also here, we don't have to compute \\phi(\\mathbf{x})^T \\mathbf{g} explicitely but use the kernel trick to calculate the RBF kernel between the new data point and every data point j in the training dataset: \\phi(\\mathbf{x})^T \\mathbf{g} = \\sum_j \\alpha_{i} \\; \\phi(\\mathbf{x}) \\; \\phi(\\mathbf{x_j})^T = \\sum_j \\alpha_{i} \\; \\kappa(\\mathbf{x}, \\mathbf{x_j}) and the eigenvectors \\alpha and eigenvalues \\lambda of the Kernel matrix \\mathbf{K} satisfy the equation \\mathbf{K} \\alpha = \\lambda \\alpha , we just need to normalize the eigenvector by the corresponding eigenvalue. References [1] Q. Wang. Kernel principal component analysis and its applications in face recognition and active shape models . CoRR, abs/1207.3538, 2012. [2] B. Scholkopf, A. Smola, and K.-R. Muller. Kernel principal component analysis . pages 583\u2013588, 1997. [3] B. Scholkopf, A. Smola, and K.-R. Muller. Nonlinear component analysis as a kernel eigenvalue problem . Neural computation, 10(5):1299\u20131319, 1998. Example 1 - Half-moon shapes We will start with a simple example of 2 half-moon shapes generated by the make_moons function from scikit-learn. import matplotlib.pyplot as plt from sklearn.datasets import make_moons X, y = make_moons(n_samples=50, random_state=1) plt.scatter(X[y==0, 0], X[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', marker='^', alpha=0.5) plt.ylabel('y coordinate') plt.xlabel('x coordinate') plt.show() Since the two half-moon shapes are linearly inseparable, we expect that the \u201cclassic\u201d PCA will fail to give us a \u201cgood\u201d representation of the data in 1D space. Let us use PCA class to perform the dimensionality reduction. from mlxtend.feature_extraction import PrincipalComponentAnalysis as PCA pca = PCA(n_components=2) X_pca = pca.fit(X).transform(X) plt.scatter(X_pca[y==0, 0], X_pca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_pca[y==1, 0], X_pca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.xlabel('PC1') plt.ylabel('PC2') plt.show() As we can see, the resulting principal components do not yield a subspace where the data is linearly separated well. Note that PCA is a unsupervised method and does not \u201cconsider\u201d class labels in order to maximize the variance in contrast to Linear Discriminant Analysis. Here, the colors blue and red are just added for visualization purposes to indicate the degree of separation. Next, we will perform dimensionality reduction via RBF kernel PCA on our half-moon data. The choice of \\gamma depends on the dataset and can be obtained via hyperparameter tuning techniques like Grid Search. Hyperparameter tuning is a broad topic itself, and here I will just use a \\gamma -value that I found to produce \u201cgood\u201d results. from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import RBFKernelPCA as KPCA kpca = KPCA(gamma=15.0, n_components=2) kpca.fit(X) X_kpca = kpca.X_projected_ Please note that the components of kernel methods such as RBF kernel PCA already represent the projected data points (in contrast to PCA, where the component axis are the \"top k\" eigenvectors thar are used to contruct a projection matrix, which is then used to transform the training samples). Thus, the projected training set is available after fitting via the .X_projected_ attribute. plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.title('First 2 principal components after RBF Kernel PCA') plt.xlabel('PC1') plt.ylabel('PC2') plt.show() The new feature space is linearly separable now. Since we are often interested in dimensionality reduction, let's have a look at the first component only. import numpy as np plt.scatter(X_kpca[y==0, 0], np.zeros((25, 1)), color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], np.zeros((25, 1)), color='blue', marker='^', alpha=0.5) plt.title('First principal component after RBF Kernel PCA') plt.xlabel('PC1') plt.yticks([]) plt.show() We can clearly see that the projection via RBF kernel PCA yielded a subspace where the classes are separated well. Such a subspace can then be used as input for generalized linear classification models, e.g., logistic regression. Projecting new data Finally, via the transform method, we can project new data onto the new component axes. import matplotlib.pyplot as plt from sklearn.datasets import make_moons X2, y2 = make_moons(n_samples=200, random_state=5) X2_kpca = kpca.transform(X2) plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5, label='fit data') plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5, label='fit data') plt.scatter(X2_kpca[y2==0, 0], X2_kpca[y2==0, 1], color='orange', marker='v', alpha=0.2, label='new data') plt.scatter(X2_kpca[y2==1, 0], X2_kpca[y2==1, 1], color='cyan', marker='s', alpha=0.2, label='new data') plt.legend() plt.show() Example 2 - Concentric circles Following the concepts explained in example 1, let's have a look at another classic case: 2 concentric circles with random noise produced by scikit-learn\u2019s make_circles . from sklearn.datasets import make_circles X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2) plt.figure(figsize=(8,6)) plt.scatter(X[y==0, 0], X[y==0, 1], color='red', alpha=0.5) plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', alpha=0.5) plt.title('Concentric circles') plt.ylabel('y coordinate') plt.xlabel('x coordinate') plt.show() from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import RBFKernelPCA as KPCA kpca = KPCA(gamma=15.0, n_components=2) kpca.fit(X) X_kpca = kpca.X_projected_ plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.title('First 2 principal components after RBF Kernel PCA') plt.xlabel('PC1') plt.ylabel('PC2') plt.show() plt.scatter(X_kpca[y==0, 0], np.zeros((500, 1)), color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], np.zeros((500, 1)), color='blue', marker='^', alpha=0.5) plt.title('First principal component after RBF Kernel PCA') plt.xlabel('PC1') plt.yticks([]) plt.show() API RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/ Methods fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"RBFKernelPCA"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#rbf-kernel-principal-component-analysis","text":"Implementation of RBF Kernel Principal Component Analysis for non-linear dimensionality reduction from mlxtend.feature_extraction import RBFKernelPCA","title":"RBF Kernel Principal Component Analysis"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#overview","text":"Most machine learning algorithms have been developed and statistically validated for linearly separable data. Popular examples are linear classifiers like Support Vector Machines (SVMs) or the (standard) Principal Component Analysis (PCA) for dimensionality reduction. However, most real world data requires nonlinear methods in order to perform tasks that involve the analysis and discovery of patterns successfully. The focus of this overview is to briefly introduce the idea of kernel methods and to implement a Gaussian radius basis function (RBF) kernel that is used to perform nonlinear dimensionality reduction via BF kernel principal component analysis (kPCA).","title":"Overview"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#principal-component-analysis","text":"The main purpose of principal component analysis (PCA) is the analysis of data to identify patterns that represent the data \u201cwell.\u201d The principal components can be understood as new axes of the dataset that maximize the variance along those axes (the eigenvectors of the covariance matrix). In other words, PCA aims to find the axes with maximum variances along which the data is most spread. For more details, please see the related article on mlxtend.feature_extraction.PrincipalComponentAnalysis .","title":"Principal Component Analysis"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#nonlinear-dimensionality-reduction","text":"The \u201cclassic\u201d PCA approach described above is a linear projection technique that works well if the data is linearly separable. However, in the case of linearly inseparable data, a nonlinear technique is required if the task is to reduce the dimensionality of a dataset.","title":"Nonlinear dimensionality reduction"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#kernel-functions-and-the-kernel-trick","text":"The basic idea to deal with linearly inseparable data is to project it onto a higher dimensional space where it becomes linearly separable. Let us call this nonlinear mapping function \\phi so that the mapping of a sample \\mathbf{x} can be written as \\mathbf{x} \\rightarrow \\phi (\\mathbf{x}) , which is called \"kernel function.\" Now, the term \"kernel\" describes a function that calculates the dot product of the images of the samples \\mathbf{x} under \\phi . \\kappa(\\mathbf{x_i, x_j}) = \\phi (\\mathbf{x_i}) \\phi (\\mathbf{x_j})^T More details about the derivation of this equation are provided in this excellent review article by Quan Wang: Kernel Principal Component Analysis and its Applications in Face Recognition and Active Shape Models .[ 1 ] In other words, the function \\phi maps the original d-dimensional features into a larger, k-dimensional feature space by creating nononlinear combinations of the original features. For example, if \\mathbf{x} consists of 2 features: \\mathbf{x} = \\big[x_1 \\quad x_2\\big]^T \\quad \\quad \\mathbf{x} \\in I\\!R^d \\Downarrow \\phi \\mathbf{x}' = \\big[x_1 \\quad x_2 \\quad x_1 x_2 \\quad x_{1}^2 \\quad x_1 x_{2}^3 \\quad \\dots \\big]^T \\quad \\quad \\mathbf{x} \\in I\\!R^k (k >> d) Often, the mathematical definition of the RBF kernel is written and implemented as \\kappa(\\mathbf{x_i, x_j}) = exp\\bigg(- \\gamma \\; \\lVert\\mathbf{x_i - x_j }\\rVert^{2}_{2} \\bigg) where \\textstyle\\gamma = \\tfrac{1}{2\\sigma^2} is a free parameter that is to be optimized.","title":"Kernel functions and the kernel trick"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#gaussian-radial-basis-function-rbf-kernel-pca","text":"In the linear PCA approach, we are interested in the principal components that maximize the variance in the dataset. This is done by extracting the eigenvectors (principle components) that correspond to the largest eigenvalues based on the covariance matrix: \\text{Cov} = \\frac{1}{N} \\sum_{i=1}^{N} \\mathbf{x_i} \\mathbf{x_i}^T Bernhard Scholkopf ( Kernel Principal Component Analysis [ 2 ]) generalized this approach for data that was mapped onto the higher dimensional space via a kernel function: \\text{Cov} = \\frac{1}{N} \\sum_{i=1}^{N} \\phi(\\mathbf{x_i}) \\phi(\\mathbf{x_i})^T However, in practice the the covariance matrix in the higher dimensional space is not calculated explicitly (kernel trick). Therefore, the implementation of RBF kernel PCA does not yield the principal component axes (in contrast to the standard PCA), but the obtained eigenvectors can be understood as projections of the data onto the principal components.","title":"Gaussian radial basis function (RBF) Kernel PCA"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#rbf-kernel-pca-step-by-step","text":"","title":"RBF kernel PCA step-by-step"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#1-computation-of-the-kernel-similarity-matrix","text":"In this first step, we need to calculate \\kappa(\\mathbf{x_i, x_j}) = exp\\bigg(- \\gamma \\; \\lVert\\mathbf{x_i - x_j }\\rVert^{2}_{2} \\bigg) for every pair of points. E.g., if we have a dataset of 100 samples, this step would result in a symmetric 100x100 kernel matrix.","title":"1. Computation of the kernel (similarity) matrix."},{"location":"user_guide/feature_extraction/RBFKernelPCA/#2-eigendecomposition-of-the-kernel-matrix","text":"Since it is not guaranteed that the kernel matrix is centered, we can apply the following equation to do so: K' = K - \\mathbf{1_N} K - K \\mathbf{1_N} + \\mathbf{1_N} K \\mathbf{1_N} where \\mathbf{1_N} is (like the kernel matrix) a N\\times N matrix with all values equal to \\frac{1}{N} . [ 3 ] Now, we have to obtain the eigenvectors of the centered kernel matrix that correspond to the largest eigenvalues. Those eigenvectors are the data points already projected onto the respective principal components.","title":"2. Eigendecomposition of the kernel matrix."},{"location":"user_guide/feature_extraction/RBFKernelPCA/#projecting-new-data","text":"So far, so good, in the sections above, we have been projecting an dataset onto a new feature subspace. However, in a real application, we are usually interested in mapping new data points onto the same new feature subspace (e.g., if are working with a training and a test dataset in pattern classification tasks). Remember, when we computed the eigenvectors \\mathbf{\\alpha} of the centered kernel matrix, those values were actually already the projected datapoints onto the principal component axis \\mathbf{g} . If we want to project a new data point \\mathbf{x} onto this principal component axis, we'd need to compute \\phi(\\mathbf{x})^T \\mathbf{g} . Fortunately, also here, we don't have to compute \\phi(\\mathbf{x})^T \\mathbf{g} explicitely but use the kernel trick to calculate the RBF kernel between the new data point and every data point j in the training dataset: \\phi(\\mathbf{x})^T \\mathbf{g} = \\sum_j \\alpha_{i} \\; \\phi(\\mathbf{x}) \\; \\phi(\\mathbf{x_j})^T = \\sum_j \\alpha_{i} \\; \\kappa(\\mathbf{x}, \\mathbf{x_j}) and the eigenvectors \\alpha and eigenvalues \\lambda of the Kernel matrix \\mathbf{K} satisfy the equation \\mathbf{K} \\alpha = \\lambda \\alpha , we just need to normalize the eigenvector by the corresponding eigenvalue.","title":"Projecting new data"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#references","text":"[1] Q. Wang. Kernel principal component analysis and its applications in face recognition and active shape models . CoRR, abs/1207.3538, 2012. [2] B. Scholkopf, A. Smola, and K.-R. Muller. Kernel principal component analysis . pages 583\u2013588, 1997. [3] B. Scholkopf, A. Smola, and K.-R. Muller. Nonlinear component analysis as a kernel eigenvalue problem . Neural computation, 10(5):1299\u20131319, 1998.","title":"References"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#example-1-half-moon-shapes","text":"We will start with a simple example of 2 half-moon shapes generated by the make_moons function from scikit-learn. import matplotlib.pyplot as plt from sklearn.datasets import make_moons X, y = make_moons(n_samples=50, random_state=1) plt.scatter(X[y==0, 0], X[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', marker='^', alpha=0.5) plt.ylabel('y coordinate') plt.xlabel('x coordinate') plt.show() Since the two half-moon shapes are linearly inseparable, we expect that the \u201cclassic\u201d PCA will fail to give us a \u201cgood\u201d representation of the data in 1D space. Let us use PCA class to perform the dimensionality reduction. from mlxtend.feature_extraction import PrincipalComponentAnalysis as PCA pca = PCA(n_components=2) X_pca = pca.fit(X).transform(X) plt.scatter(X_pca[y==0, 0], X_pca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_pca[y==1, 0], X_pca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.xlabel('PC1') plt.ylabel('PC2') plt.show() As we can see, the resulting principal components do not yield a subspace where the data is linearly separated well. Note that PCA is a unsupervised method and does not \u201cconsider\u201d class labels in order to maximize the variance in contrast to Linear Discriminant Analysis. Here, the colors blue and red are just added for visualization purposes to indicate the degree of separation. Next, we will perform dimensionality reduction via RBF kernel PCA on our half-moon data. The choice of \\gamma depends on the dataset and can be obtained via hyperparameter tuning techniques like Grid Search. Hyperparameter tuning is a broad topic itself, and here I will just use a \\gamma -value that I found to produce \u201cgood\u201d results. from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import RBFKernelPCA as KPCA kpca = KPCA(gamma=15.0, n_components=2) kpca.fit(X) X_kpca = kpca.X_projected_ Please note that the components of kernel methods such as RBF kernel PCA already represent the projected data points (in contrast to PCA, where the component axis are the \"top k\" eigenvectors thar are used to contruct a projection matrix, which is then used to transform the training samples). Thus, the projected training set is available after fitting via the .X_projected_ attribute. plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.title('First 2 principal components after RBF Kernel PCA') plt.xlabel('PC1') plt.ylabel('PC2') plt.show() The new feature space is linearly separable now. Since we are often interested in dimensionality reduction, let's have a look at the first component only. import numpy as np plt.scatter(X_kpca[y==0, 0], np.zeros((25, 1)), color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], np.zeros((25, 1)), color='blue', marker='^', alpha=0.5) plt.title('First principal component after RBF Kernel PCA') plt.xlabel('PC1') plt.yticks([]) plt.show() We can clearly see that the projection via RBF kernel PCA yielded a subspace where the classes are separated well. Such a subspace can then be used as input for generalized linear classification models, e.g., logistic regression.","title":"Example 1 - Half-moon shapes"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#projecting-new-data_1","text":"Finally, via the transform method, we can project new data onto the new component axes. import matplotlib.pyplot as plt from sklearn.datasets import make_moons X2, y2 = make_moons(n_samples=200, random_state=5) X2_kpca = kpca.transform(X2) plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5, label='fit data') plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5, label='fit data') plt.scatter(X2_kpca[y2==0, 0], X2_kpca[y2==0, 1], color='orange', marker='v', alpha=0.2, label='new data') plt.scatter(X2_kpca[y2==1, 0], X2_kpca[y2==1, 1], color='cyan', marker='s', alpha=0.2, label='new data') plt.legend() plt.show()","title":"Projecting new data"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#example-2-concentric-circles","text":"Following the concepts explained in example 1, let's have a look at another classic case: 2 concentric circles with random noise produced by scikit-learn\u2019s make_circles . from sklearn.datasets import make_circles X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2) plt.figure(figsize=(8,6)) plt.scatter(X[y==0, 0], X[y==0, 1], color='red', alpha=0.5) plt.scatter(X[y==1, 0], X[y==1, 1], color='blue', alpha=0.5) plt.title('Concentric circles') plt.ylabel('y coordinate') plt.xlabel('x coordinate') plt.show() from mlxtend.data import iris_data from mlxtend.preprocessing import standardize from mlxtend.feature_extraction import RBFKernelPCA as KPCA kpca = KPCA(gamma=15.0, n_components=2) kpca.fit(X) X_kpca = kpca.X_projected_ plt.scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='^', alpha=0.5) plt.title('First 2 principal components after RBF Kernel PCA') plt.xlabel('PC1') plt.ylabel('PC2') plt.show() plt.scatter(X_kpca[y==0, 0], np.zeros((500, 1)), color='red', marker='o', alpha=0.5) plt.scatter(X_kpca[y==1, 0], np.zeros((500, 1)), color='blue', marker='^', alpha=0.5) plt.title('First principal component after RBF Kernel PCA') plt.xlabel('PC1') plt.yticks([]) plt.show()","title":"Example 2 - Concentric circles"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#api","text":"RBFKernelPCA(gamma=15.0, n_components=None, copy_X=True) RBF Kernel Principal Component Analysis for dimensionality reduction. Parameters gamma : float (default: 15.0) Free parameter (coefficient) of the RBF kernel. n_components : int (default: None) The number of principal components for transformation. Keeps the original dimensions of the dataset if None . copy_X : bool (default: True) Copies training data, which is required to compute the projection of new data via the transform method. Uses a reference to X if False. Attributes e_vals_ : array-like, shape=[n_features] Eigenvalues in sorted order. e_vecs_ : array-like, shape=[n_features] Eigenvectors in sorted order. X_projected_ : array-like, shape=[n_samples, n_components] Training samples projected along the component axes. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/","title":"API"},{"location":"user_guide/feature_extraction/RBFKernelPCA/#methods","text":"fit(X) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns self : object transform(X) Apply the non-linear transformation on X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_projected : np.ndarray, shape = [n_samples, n_components] Projected training vectors.","title":"Methods"},{"location":"user_guide/feature_selection/ColumnSelector/","text":"ColumnSelector Implementation of a column selector class for scikit-learn pipelines. from mlxtend.feature_selection import ColumnSelector Overview The ColumnSelector can be used for \"manual\" feature selection, e.g., as part of a grid search via a scikit-learn pipeline. References - Example 1 - Fitting an Estimator on a Feature Subset Load a simple benchmark dataset: from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target The ColumnSelector is a simple transformer class that selects specific columns (features) from a datast. For instance, using the transform method returns a reduced dataset that only contains two features (here: the first two features via the indices 0 and 1, respectively): from mlxtend.feature_selection import ColumnSelector col_selector = ColumnSelector(cols=(0, 1)) # col_selector.fit(X) # optional, does not do anything col_selector.transform(X).shape (150, 2) ColumnSelector works both with numpy arrays and pandas dataframes: import pandas as pd iris_df = pd.DataFrame(iris.data, columns=iris.feature_names) iris_df.head() .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 col_selector = ColumnSelector(cols=(\"sepal length (cm)\", \"sepal width (cm)\")) col_selector.transform(iris_df).shape (150, 2) Similarly, we can use the ColumnSelector as part of a scikit-learn Pipeline : from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline pipe = make_pipeline(StandardScaler(), ColumnSelector(cols=(0, 1)), KNeighborsClassifier()) pipe.fit(X, y) pipe.score(X, y) 0.84 Example 2 - Feature Selection via GridSearch Example 1 showed a simple useage example of the ColumnSelector ; however, selecting columns from a dataset is trivial and does not require a specific transformer class since we could have achieved the same results via classifier.fit(X[:, :2], y) classifier.score(X[:, :2], y) However, the ColumnSelector becomes really useful for feature selection as part of a grid search as shown in this example. Load a simple benchmark dataset: from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target Create all possible combinations: from itertools import combinations all_comb = [] for size in range(1, 5): all_comb += list(combinations(range(X.shape[1]), r=size)) print(all_comb) [(0,), (1,), (2,), (3,), (0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3), (0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3), (0, 1, 2, 3)] Feature and model selection via grid search: from mlxtend.feature_selection import ColumnSelector from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline pipe = make_pipeline(StandardScaler(), ColumnSelector(), KNeighborsClassifier()) param_grid = {'columnselector__cols': all_comb, 'kneighborsclassifier__n_neighbors': list(range(1, 11))} grid = GridSearchCV(pipe, param_grid, cv=5, n_jobs=-1) grid.fit(X, y) print('Best parameters:', grid.best_params_) print('Best performance:', grid.best_score_) Best parameters: {'columnselector__cols': (2, 3), 'kneighborsclassifier__n_neighbors': 1} Best performance: 0.98 Example 3 -- Scaling of a Subset of Features in a scikit-learn Pipeline The following example illustrates how we could use the ColumnSelector in tandem with scikit-learn's FeatureUnion to only scale certain features (in this toy example: the first and second feature only) in a datasets in a Pipeline . from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.pipeline import Pipeline from sklearn.pipeline import FeatureUnion from sklearn.preprocessing import MinMaxScaler from sklearn.neighbors import KNeighborsClassifier from mlxtend.data import iris_data X, y = iris_data() scale_pipe = make_pipeline(ColumnSelector(cols=(0, 1)), MinMaxScaler()) pipeline = Pipeline([ ('feats', FeatureUnion([ ('col_1-2', scale_pipe), ('col_3-4', ColumnSelector(cols=(2, 3))) ])), ('clf', KNeighborsClassifier()) ]) pipeline.fit(X, y) Pipeline(memory=None, steps=[('feats', FeatureUnion(n_jobs=None, transformer_list=[('col_1-2', Pipeline(memory=None, steps=[('columnselector', ColumnSelector(cols=(0, 1), drop_axis=False)), ('minmaxscaler', MinMaxScaler(copy=True, feature_range=(0, 1)))])), ('col_3-4', ColumnSelector(cols=(2, 3), drop_axis=Fa...ki', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'))]) API ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features","title":"ColumnSelector"},{"location":"user_guide/feature_selection/ColumnSelector/#columnselector","text":"Implementation of a column selector class for scikit-learn pipelines. from mlxtend.feature_selection import ColumnSelector","title":"ColumnSelector"},{"location":"user_guide/feature_selection/ColumnSelector/#overview","text":"The ColumnSelector can be used for \"manual\" feature selection, e.g., as part of a grid search via a scikit-learn pipeline.","title":"Overview"},{"location":"user_guide/feature_selection/ColumnSelector/#references","text":"-","title":"References"},{"location":"user_guide/feature_selection/ColumnSelector/#example-1-fitting-an-estimator-on-a-feature-subset","text":"Load a simple benchmark dataset: from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target The ColumnSelector is a simple transformer class that selects specific columns (features) from a datast. For instance, using the transform method returns a reduced dataset that only contains two features (here: the first two features via the indices 0 and 1, respectively): from mlxtend.feature_selection import ColumnSelector col_selector = ColumnSelector(cols=(0, 1)) # col_selector.fit(X) # optional, does not do anything col_selector.transform(X).shape (150, 2) ColumnSelector works both with numpy arrays and pandas dataframes: import pandas as pd iris_df = pd.DataFrame(iris.data, columns=iris.feature_names) iris_df.head() .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 col_selector = ColumnSelector(cols=(\"sepal length (cm)\", \"sepal width (cm)\")) col_selector.transform(iris_df).shape (150, 2) Similarly, we can use the ColumnSelector as part of a scikit-learn Pipeline : from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline pipe = make_pipeline(StandardScaler(), ColumnSelector(cols=(0, 1)), KNeighborsClassifier()) pipe.fit(X, y) pipe.score(X, y) 0.84","title":"Example 1 - Fitting an Estimator on a Feature Subset"},{"location":"user_guide/feature_selection/ColumnSelector/#example-2-feature-selection-via-gridsearch","text":"Example 1 showed a simple useage example of the ColumnSelector ; however, selecting columns from a dataset is trivial and does not require a specific transformer class since we could have achieved the same results via classifier.fit(X[:, :2], y) classifier.score(X[:, :2], y) However, the ColumnSelector becomes really useful for feature selection as part of a grid search as shown in this example. Load a simple benchmark dataset: from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target Create all possible combinations: from itertools import combinations all_comb = [] for size in range(1, 5): all_comb += list(combinations(range(X.shape[1]), r=size)) print(all_comb) [(0,), (1,), (2,), (3,), (0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3), (0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3), (0, 1, 2, 3)] Feature and model selection via grid search: from mlxtend.feature_selection import ColumnSelector from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline pipe = make_pipeline(StandardScaler(), ColumnSelector(), KNeighborsClassifier()) param_grid = {'columnselector__cols': all_comb, 'kneighborsclassifier__n_neighbors': list(range(1, 11))} grid = GridSearchCV(pipe, param_grid, cv=5, n_jobs=-1) grid.fit(X, y) print('Best parameters:', grid.best_params_) print('Best performance:', grid.best_score_) Best parameters: {'columnselector__cols': (2, 3), 'kneighborsclassifier__n_neighbors': 1} Best performance: 0.98","title":"Example 2 - Feature Selection via GridSearch"},{"location":"user_guide/feature_selection/ColumnSelector/#example-3-scaling-of-a-subset-of-features-in-a-scikit-learn-pipeline","text":"The following example illustrates how we could use the ColumnSelector in tandem with scikit-learn's FeatureUnion to only scale certain features (in this toy example: the first and second feature only) in a datasets in a Pipeline . from mlxtend.feature_selection import ColumnSelector from sklearn.pipeline import make_pipeline from sklearn.pipeline import Pipeline from sklearn.pipeline import FeatureUnion from sklearn.preprocessing import MinMaxScaler from sklearn.neighbors import KNeighborsClassifier from mlxtend.data import iris_data X, y = iris_data() scale_pipe = make_pipeline(ColumnSelector(cols=(0, 1)), MinMaxScaler()) pipeline = Pipeline([ ('feats', FeatureUnion([ ('col_1-2', scale_pipe), ('col_3-4', ColumnSelector(cols=(2, 3))) ])), ('clf', KNeighborsClassifier()) ]) pipeline.fit(X, y) Pipeline(memory=None, steps=[('feats', FeatureUnion(n_jobs=None, transformer_list=[('col_1-2', Pipeline(memory=None, steps=[('columnselector', ColumnSelector(cols=(0, 1), drop_axis=False)), ('minmaxscaler', MinMaxScaler(copy=True, feature_range=(0, 1)))])), ('col_3-4', ColumnSelector(cols=(2, 3), drop_axis=Fa...ki', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'))])","title":"Example 3 -- Scaling of a Subset of Features in a scikit-learn Pipeline"},{"location":"user_guide/feature_selection/ColumnSelector/#api","text":"ColumnSelector(cols=None, drop_axis=False) Object for selecting specific columns from a data set. Parameters cols : array-like (default: None) A list specifying the feature indices to be selected. For example, [1, 4, 5] to select the 2nd, 5th, and 6th feature columns. If None, returns all columns in the array. drop_axis : bool (default=False) Drops last axis if True and the only one column is selected. This is useful, e.g., when the ColumnSelector is used for selecting only one column and the resulting array should be fed to e.g., a scikit-learn column selector. E.g., instead of returning an array with shape (n_samples, 1), drop_axis=True will return an aray with shape (n_samples,). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/","title":"API"},{"location":"user_guide/feature_selection/ColumnSelector/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a slice of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_slice : shape = [n_samples, k_features] Subset of the feature space where k_features <= n_features","title":"Methods"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/","text":"Exhaustive Feature Selector Implementation of an exhaustive feature selector for sampling and evaluating all possible feature combinations in a specified range. from mlxtend.feature_selection import ExhaustiveFeatureSelector Overview This exhaustive feature selection algorithm is a wrapper approach for brute-force evaluation of feature subsets; the best subset is selected by optimizing a specified performance metric given an arbitrary regressor or classifier. For instance, if the classifier is a logistic regression and the dataset consists of 4 features, the alogorithm will evaluate all 15 feature combinations (if min_features=1 and max_features=4 ) {0} {1} {2} {3} {0, 1} {0, 2} {0, 3} {1, 2} {1, 3} {2, 3} {0, 1, 2} {0, 1, 3} {0, 2, 3} {1, 2, 3} {0, 1, 2, 3} and select the one that results in the best performance (e.g., classification accuracy) of the logistic regression classifier. Example 1 - A simple Iris Example Initializing a simple classifier from scikit-learn: from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) efs1 = efs1.fit(X, y) print('Best accuracy score: %.2f' % efs1.best_score_) print('Best subset (indices):', efs1.best_idx_) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best accuracy score: 0.97 Best subset (indices): (0, 2, 3) Best subset (corresponding names): ('0', '2', '3') Note that in the example above, the 'best_feature_names_' are simply a string equivalent of the feature indices. However, we can provide custom feature names to the fit function for this mapping: feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') efs1 = efs1.fit(X, y, custom_feature_names=feature_names) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best subset (corresponding names): ('sepal length', 'petal length', 'petal width') Via the subsets_ attribute, we can take a look at the selected feature indices at each step: efs1.subsets_ {0: {'avg_score': 0.65999999999999992, 'cv_scores': array([ 0.53333333, 0.63333333, 0.73333333, 0.76666667, 0.63333333]), 'feature_idx': (0,), 'feature_names': ('sepal length',)}, 1: {'avg_score': 0.56666666666666665, 'cv_scores': array([ 0.53333333, 0.63333333, 0.6 , 0.5 , 0.56666667]), 'feature_idx': (1,), 'feature_names': ('sepal width',)}, 2: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.93333333, 1. , 0.9 , 0.93333333, 1. ]), 'feature_idx': (2,), 'feature_names': ('petal length',)}, 3: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.86666667, 1. ]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 4: {'avg_score': 0.72666666666666668, 'cv_scores': array([ 0.66666667, 0.8 , 0.63333333, 0.86666667, 0.66666667]), 'feature_idx': (0, 1), 'feature_names': ('sepal length', 'sepal width')}, 5: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 1. , 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (0, 2), 'feature_names': ('sepal length', 'petal length')}, 6: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.96666667, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (0, 3), 'feature_names': ('sepal length', 'petal width')}, 7: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 1. , 0.9 , 0.93333333, 0.93333333]), 'feature_idx': (1, 2), 'feature_names': ('sepal width', 'petal length')}, 8: {'avg_score': 0.94000000000000006, 'cv_scores': array([ 0.96666667, 0.96666667, 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (1, 3), 'feature_names': ('sepal width', 'petal width')}, 9: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.96666667, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (2, 3), 'feature_names': ('petal length', 'petal width')}, 10: {'avg_score': 0.94000000000000006, 'cv_scores': array([ 0.96666667, 0.96666667, 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (0, 1, 2), 'feature_names': ('sepal length', 'sepal width', 'petal length')}, 11: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.93333333, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (0, 1, 3), 'feature_names': ('sepal length', 'sepal width', 'petal width')}, 12: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.96666667, 0.96666667, 0.96666667, 0.96666667, 1. ]), 'feature_idx': (0, 2, 3), 'feature_names': ('sepal length', 'petal length', 'petal width')}, 13: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.93333333, 1. ]), 'feature_idx': (1, 2, 3), 'feature_names': ('sepal width', 'petal length', 'petal width')}, 14: {'avg_score': 0.96666666666666679, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.96666667, 1. ]), 'feature_idx': (0, 1, 2, 3), 'feature_names': ('sepal length', 'sepal width', 'petal length', 'petal width')}} Example 2 - Visualizing the feature selection results For our convenience, we can visualize the output from the feature selection in a pandas DataFrame format using the get_metric_dict method of the ExhaustiveFeatureSelector object. The columns std_dev and std_err represent the standard deviation and standard errors of the cross-validation scores, respectively. Below, we see the DataFrame of the Sequential Forward Selector from Example 2: import pandas as pd iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') efs1 = efs1.fit(X, y, custom_feature_names=feature_names) df = pd.DataFrame.from_dict(efs1.get_metric_dict()).T df.sort_values('avg_score', inplace=True, ascending=False) df Features: 15/15 .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 12 0.973333 0.0171372 [0.966666666667, 0.966666666667, 0.96666666666... (0, 2, 3) (sepal length, petal length, petal width) 0.0133333 0.00666667 14 0.966667 0.0270963 [0.966666666667, 0.966666666667, 0.93333333333... (0, 1, 2, 3) (sepal length, sepal width, petal length, peta... 0.0210819 0.0105409 13 0.96 0.0320608 [0.966666666667, 0.966666666667, 0.93333333333... (1, 2, 3) (sepal width, petal length, petal width) 0.0249444 0.0124722 2 0.953333 0.0514116 [0.933333333333, 1.0, 0.9, 0.933333333333, 1.0] (2,) (petal length,) 0.04 0.02 6 0.953333 0.0436915 [0.966666666667, 0.966666666667, 0.9, 0.933333... (0, 3) (sepal length, petal width) 0.0339935 0.0169967 9 0.953333 0.0436915 [0.966666666667, 0.966666666667, 0.9, 0.933333... (2, 3) (petal length, petal width) 0.0339935 0.0169967 3 0.946667 0.0581151 [0.966666666667, 0.966666666667, 0.93333333333... (3,) (petal width,) 0.0452155 0.0226078 5 0.946667 0.0581151 [0.966666666667, 1.0, 0.866666666667, 0.933333... (0, 2) (sepal length, petal length) 0.0452155 0.0226078 7 0.946667 0.0436915 [0.966666666667, 1.0, 0.9, 0.933333333333, 0.9... (1, 2) (sepal width, petal length) 0.0339935 0.0169967 11 0.946667 0.0436915 [0.933333333333, 0.966666666667, 0.9, 0.933333... (0, 1, 3) (sepal length, sepal width, petal width) 0.0339935 0.0169967 8 0.94 0.0499631 [0.966666666667, 0.966666666667, 0.86666666666... (1, 3) (sepal width, petal width) 0.038873 0.0194365 10 0.94 0.0499631 [0.966666666667, 0.966666666667, 0.86666666666... (0, 1, 2) (sepal length, sepal width, petal length) 0.038873 0.0194365 4 0.726667 0.11623 [0.666666666667, 0.8, 0.633333333333, 0.866666... (0, 1) (sepal length, sepal width) 0.0904311 0.0452155 0 0.66 0.106334 [0.533333333333, 0.633333333333, 0.73333333333... (0,) (sepal length,) 0.0827312 0.0413656 1 0.566667 0.0605892 [0.533333333333, 0.633333333333, 0.6, 0.5, 0.5... (1,) (sepal width,) 0.0471405 0.0235702 import matplotlib.pyplot as plt metric_dict = efs1.get_metric_dict() fig = plt.figure() k_feat = sorted(metric_dict.keys()) avg = [metric_dict[k]['avg_score'] for k in k_feat] upper, lower = [], [] for k in k_feat: upper.append(metric_dict[k]['avg_score'] + metric_dict[k]['std_dev']) lower.append(metric_dict[k]['avg_score'] - metric_dict[k]['std_dev']) plt.fill_between(k_feat, upper, lower, alpha=0.2, color='blue', lw=1) plt.plot(k_feat, avg, color='blue', marker='o') plt.ylabel('Accuracy +/- Standard Deviation') plt.xlabel('Number of Features') feature_min = len(metric_dict[k_feat[0]]['feature_idx']) feature_max = len(metric_dict[k_feat[-1]]['feature_idx']) plt.xticks(k_feat, [str(metric_dict[k]['feature_names']) for k in k_feat], rotation=90) plt.show() Example 3 - Exhaustive Feature Selection for Regression Similar to the classification examples above, the SequentialFeatureSelector also supports scikit-learn's estimators for regression. from sklearn.linear_model import LinearRegression from sklearn.datasets import load_boston boston = load_boston() X, y = boston.data, boston.target lr = LinearRegression() efs = EFS(lr, min_features=10, max_features=12, scoring='neg_mean_squared_error', cv=10) efs.fit(X, y) print('Best MSE score: %.2f' % efs.best_score_ * (-1)) print('Best subset:', efs.best_idx_) Features: 377/377 Best subset: (0, 1, 4, 6, 7, 8, 9, 10, 11, 12) Example 4 - Using the Selected Feature Subset For Making New Predictions # Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) knn = KNeighborsClassifier(n_neighbors=3) # Select the \"best\" three features via # 5-fold cross-validation on the training set. from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', cv=5) efs1 = efs1.fit(X_train, y_train) Features: 15/15 print('Selected features:', efs1.best_idx_) Selected features: (2, 3) # Generate the new subsets based on the selected features # Note that the transform call is equivalent to # X_train[:, efs1.k_feature_idx_] X_train_efs = efs1.transform(X_train) X_test_efs = efs1.transform(X_test) # Fit the estimator using the new feature subset # and make a prediction on the test data knn.fit(X_train_efs, y_train) y_pred = knn.predict(X_test_efs) # Compute the accuracy of the prediction acc = float((y_test == y_pred).sum()) / y_pred.shape[0] print('Test set accuracy: %.2f %%' % (acc*100)) Test set accuracy: 96.00 % Example 5 - Exhaustive Feature Selection and GridSearch # Initialize the dataset from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) Use scikit-learn's GridSearch to tune the hyperparameters of the LogisticRegression estimator inside the ExhaustiveFeatureSelector and use it for prediction in the pipeline. Note that the clone_estimator attribute needs to be set to False . from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS lr = LogisticRegression(multi_class='multinomial', solver='lbfgs', random_state=123) efs1 = EFS(estimator=lr, min_features=2, max_features=3, scoring='accuracy', print_progress=False, clone_estimator=False, cv=5, n_jobs=1) pipe = make_pipeline(efs1, lr) param_grid = {'exhaustivefeatureselector__estimator__C': [0.1, 1.0, 10.0]} gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=2, verbose=1, refit=False) # run gridearch gs = gs.fit(X_train, y_train) Fitting 2 folds for each of 3 candidates, totalling 6 fits [Parallel(n_jobs=1)]: Done 6 out of 6 | elapsed: 2.7s finished ... and the \"best\" parameters determined by GridSearch are ... print(\"Best parameters via GridSearch\", gs.best_params_) Best parameters via GridSearch {'exhaustivefeatureselector__estimator__C': 1.0} Obtaining the best k feature indices after GridSearch If we are interested in the best k best feature indices via SequentialFeatureSelection.best_idx_ , we have to initialize a GridSearchCV object with refit=True . Now, the grid search object will take the complete training dataset and the best parameters, which it found via cross-validation, to train the estimator pipeline. gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=2, verbose=1, refit=True) After running the grid search, we can access the individual pipeline objects of the best_estimator_ via the steps attribute. gs = gs.fit(X_train, y_train) gs.best_estimator_.steps Fitting 2 folds for each of 3 candidates, totalling 6 fits [Parallel(n_jobs=1)]: Done 6 out of 6 | elapsed: 2.9s finished [('exhaustivefeatureselector', ExhaustiveFeatureSelector(clone_estimator=False, cv=5, estimator=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='multinomial', n_jobs=1, penalty='l2', random_state=123, solver='lbfgs', tol=0.0001, verbose=0, warm_start=False), max_features=3, min_features=2, n_jobs=1, pre_dispatch='2*n_jobs', print_progress=False, scoring='accuracy')), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='multinomial', n_jobs=1, penalty='l2', random_state=123, solver='lbfgs', tol=0.0001, verbose=0, warm_start=False))] Via sub-indexing, we can then obtain the best-selected feature subset: print('Best features:', gs.best_estimator_.steps[0][1].best_idx_) Best features: (2, 3) During cross-validation, this feature combination had a CV accuracy of: print('Best score:', gs.best_score_) Best score: 0.97 gs.best_params_ {'exhaustivefeatureselector__estimator__C': 1.0} Alternatively , if we can set the \"best grid search parameters\" in our pipeline manually if we ran GridSearchCV with refit=False . It should yield the same results: pipe.set_params(**gs.best_params_).fit(X_train, y_train) print('Best features:', pipe.steps[0][1].best_idx_) Best features: (2, 3) Example 6 - Working with pandas DataFrames Optionally, we can also use pandas DataFrames and pandas Series as input to the fit function. In this case, the column names of the pandas DataFrame will be used as feature names. However, note that if custom_feature_names are provided in the fit function, these custom_feature_names take precedence over the DataFrame column-based feature names. import pandas as pd from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() col_names = ('sepal length', 'sepal width', 'petal length', 'petal width') X_df = pd.DataFrame(iris.data, columns=col_names) y_series = pd.Series(iris.target) knn = KNeighborsClassifier(n_neighbors=4) from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) efs1 = efs1.fit(X_df, y_series) print('Best accuracy score: %.2f' % efs1.best_score_) print('Best subset (indices):', efs1.best_idx_) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best accuracy score: 0.97 Best subset (indices): (0, 2, 3) Best subset (corresponding names): ('sepal length', 'petal length', 'petal width') API ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features}","title":"Exhaustive Feature Selector"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#exhaustive-feature-selector","text":"Implementation of an exhaustive feature selector for sampling and evaluating all possible feature combinations in a specified range. from mlxtend.feature_selection import ExhaustiveFeatureSelector","title":"Exhaustive Feature Selector"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#overview","text":"This exhaustive feature selection algorithm is a wrapper approach for brute-force evaluation of feature subsets; the best subset is selected by optimizing a specified performance metric given an arbitrary regressor or classifier. For instance, if the classifier is a logistic regression and the dataset consists of 4 features, the alogorithm will evaluate all 15 feature combinations (if min_features=1 and max_features=4 ) {0} {1} {2} {3} {0, 1} {0, 2} {0, 3} {1, 2} {1, 3} {2, 3} {0, 1, 2} {0, 1, 3} {0, 2, 3} {1, 2, 3} {0, 1, 2, 3} and select the one that results in the best performance (e.g., classification accuracy) of the logistic regression classifier.","title":"Overview"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-1-a-simple-iris-example","text":"Initializing a simple classifier from scikit-learn: from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) efs1 = efs1.fit(X, y) print('Best accuracy score: %.2f' % efs1.best_score_) print('Best subset (indices):', efs1.best_idx_) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best accuracy score: 0.97 Best subset (indices): (0, 2, 3) Best subset (corresponding names): ('0', '2', '3') Note that in the example above, the 'best_feature_names_' are simply a string equivalent of the feature indices. However, we can provide custom feature names to the fit function for this mapping: feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') efs1 = efs1.fit(X, y, custom_feature_names=feature_names) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best subset (corresponding names): ('sepal length', 'petal length', 'petal width') Via the subsets_ attribute, we can take a look at the selected feature indices at each step: efs1.subsets_ {0: {'avg_score': 0.65999999999999992, 'cv_scores': array([ 0.53333333, 0.63333333, 0.73333333, 0.76666667, 0.63333333]), 'feature_idx': (0,), 'feature_names': ('sepal length',)}, 1: {'avg_score': 0.56666666666666665, 'cv_scores': array([ 0.53333333, 0.63333333, 0.6 , 0.5 , 0.56666667]), 'feature_idx': (1,), 'feature_names': ('sepal width',)}, 2: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.93333333, 1. , 0.9 , 0.93333333, 1. ]), 'feature_idx': (2,), 'feature_names': ('petal length',)}, 3: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.86666667, 1. ]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 4: {'avg_score': 0.72666666666666668, 'cv_scores': array([ 0.66666667, 0.8 , 0.63333333, 0.86666667, 0.66666667]), 'feature_idx': (0, 1), 'feature_names': ('sepal length', 'sepal width')}, 5: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 1. , 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (0, 2), 'feature_names': ('sepal length', 'petal length')}, 6: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.96666667, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (0, 3), 'feature_names': ('sepal length', 'petal width')}, 7: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.96666667, 1. , 0.9 , 0.93333333, 0.93333333]), 'feature_idx': (1, 2), 'feature_names': ('sepal width', 'petal length')}, 8: {'avg_score': 0.94000000000000006, 'cv_scores': array([ 0.96666667, 0.96666667, 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (1, 3), 'feature_names': ('sepal width', 'petal width')}, 9: {'avg_score': 0.95333333333333337, 'cv_scores': array([ 0.96666667, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (2, 3), 'feature_names': ('petal length', 'petal width')}, 10: {'avg_score': 0.94000000000000006, 'cv_scores': array([ 0.96666667, 0.96666667, 0.86666667, 0.93333333, 0.96666667]), 'feature_idx': (0, 1, 2), 'feature_names': ('sepal length', 'sepal width', 'petal length')}, 11: {'avg_score': 0.94666666666666666, 'cv_scores': array([ 0.93333333, 0.96666667, 0.9 , 0.93333333, 1. ]), 'feature_idx': (0, 1, 3), 'feature_names': ('sepal length', 'sepal width', 'petal width')}, 12: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.96666667, 0.96666667, 0.96666667, 0.96666667, 1. ]), 'feature_idx': (0, 2, 3), 'feature_names': ('sepal length', 'petal length', 'petal width')}, 13: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.93333333, 1. ]), 'feature_idx': (1, 2, 3), 'feature_names': ('sepal width', 'petal length', 'petal width')}, 14: {'avg_score': 0.96666666666666679, 'cv_scores': array([ 0.96666667, 0.96666667, 0.93333333, 0.96666667, 1. ]), 'feature_idx': (0, 1, 2, 3), 'feature_names': ('sepal length', 'sepal width', 'petal length', 'petal width')}}","title":"Example 1 - A simple Iris Example"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-2-visualizing-the-feature-selection-results","text":"For our convenience, we can visualize the output from the feature selection in a pandas DataFrame format using the get_metric_dict method of the ExhaustiveFeatureSelector object. The columns std_dev and std_err represent the standard deviation and standard errors of the cross-validation scores, respectively. Below, we see the DataFrame of the Sequential Forward Selector from Example 2: import pandas as pd iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') efs1 = efs1.fit(X, y, custom_feature_names=feature_names) df = pd.DataFrame.from_dict(efs1.get_metric_dict()).T df.sort_values('avg_score', inplace=True, ascending=False) df Features: 15/15 .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 12 0.973333 0.0171372 [0.966666666667, 0.966666666667, 0.96666666666... (0, 2, 3) (sepal length, petal length, petal width) 0.0133333 0.00666667 14 0.966667 0.0270963 [0.966666666667, 0.966666666667, 0.93333333333... (0, 1, 2, 3) (sepal length, sepal width, petal length, peta... 0.0210819 0.0105409 13 0.96 0.0320608 [0.966666666667, 0.966666666667, 0.93333333333... (1, 2, 3) (sepal width, petal length, petal width) 0.0249444 0.0124722 2 0.953333 0.0514116 [0.933333333333, 1.0, 0.9, 0.933333333333, 1.0] (2,) (petal length,) 0.04 0.02 6 0.953333 0.0436915 [0.966666666667, 0.966666666667, 0.9, 0.933333... (0, 3) (sepal length, petal width) 0.0339935 0.0169967 9 0.953333 0.0436915 [0.966666666667, 0.966666666667, 0.9, 0.933333... (2, 3) (petal length, petal width) 0.0339935 0.0169967 3 0.946667 0.0581151 [0.966666666667, 0.966666666667, 0.93333333333... (3,) (petal width,) 0.0452155 0.0226078 5 0.946667 0.0581151 [0.966666666667, 1.0, 0.866666666667, 0.933333... (0, 2) (sepal length, petal length) 0.0452155 0.0226078 7 0.946667 0.0436915 [0.966666666667, 1.0, 0.9, 0.933333333333, 0.9... (1, 2) (sepal width, petal length) 0.0339935 0.0169967 11 0.946667 0.0436915 [0.933333333333, 0.966666666667, 0.9, 0.933333... (0, 1, 3) (sepal length, sepal width, petal width) 0.0339935 0.0169967 8 0.94 0.0499631 [0.966666666667, 0.966666666667, 0.86666666666... (1, 3) (sepal width, petal width) 0.038873 0.0194365 10 0.94 0.0499631 [0.966666666667, 0.966666666667, 0.86666666666... (0, 1, 2) (sepal length, sepal width, petal length) 0.038873 0.0194365 4 0.726667 0.11623 [0.666666666667, 0.8, 0.633333333333, 0.866666... (0, 1) (sepal length, sepal width) 0.0904311 0.0452155 0 0.66 0.106334 [0.533333333333, 0.633333333333, 0.73333333333... (0,) (sepal length,) 0.0827312 0.0413656 1 0.566667 0.0605892 [0.533333333333, 0.633333333333, 0.6, 0.5, 0.5... (1,) (sepal width,) 0.0471405 0.0235702 import matplotlib.pyplot as plt metric_dict = efs1.get_metric_dict() fig = plt.figure() k_feat = sorted(metric_dict.keys()) avg = [metric_dict[k]['avg_score'] for k in k_feat] upper, lower = [], [] for k in k_feat: upper.append(metric_dict[k]['avg_score'] + metric_dict[k]['std_dev']) lower.append(metric_dict[k]['avg_score'] - metric_dict[k]['std_dev']) plt.fill_between(k_feat, upper, lower, alpha=0.2, color='blue', lw=1) plt.plot(k_feat, avg, color='blue', marker='o') plt.ylabel('Accuracy +/- Standard Deviation') plt.xlabel('Number of Features') feature_min = len(metric_dict[k_feat[0]]['feature_idx']) feature_max = len(metric_dict[k_feat[-1]]['feature_idx']) plt.xticks(k_feat, [str(metric_dict[k]['feature_names']) for k in k_feat], rotation=90) plt.show()","title":"Example 2 - Visualizing the feature selection results"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-3-exhaustive-feature-selection-for-regression","text":"Similar to the classification examples above, the SequentialFeatureSelector also supports scikit-learn's estimators for regression. from sklearn.linear_model import LinearRegression from sklearn.datasets import load_boston boston = load_boston() X, y = boston.data, boston.target lr = LinearRegression() efs = EFS(lr, min_features=10, max_features=12, scoring='neg_mean_squared_error', cv=10) efs.fit(X, y) print('Best MSE score: %.2f' % efs.best_score_ * (-1)) print('Best subset:', efs.best_idx_) Features: 377/377 Best subset: (0, 1, 4, 6, 7, 8, 9, 10, 11, 12)","title":"Example 3 - Exhaustive Feature Selection for Regression"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-4-using-the-selected-feature-subset-for-making-new-predictions","text":"# Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) knn = KNeighborsClassifier(n_neighbors=3) # Select the \"best\" three features via # 5-fold cross-validation on the training set. from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', cv=5) efs1 = efs1.fit(X_train, y_train) Features: 15/15 print('Selected features:', efs1.best_idx_) Selected features: (2, 3) # Generate the new subsets based on the selected features # Note that the transform call is equivalent to # X_train[:, efs1.k_feature_idx_] X_train_efs = efs1.transform(X_train) X_test_efs = efs1.transform(X_test) # Fit the estimator using the new feature subset # and make a prediction on the test data knn.fit(X_train_efs, y_train) y_pred = knn.predict(X_test_efs) # Compute the accuracy of the prediction acc = float((y_test == y_pred).sum()) / y_pred.shape[0] print('Test set accuracy: %.2f %%' % (acc*100)) Test set accuracy: 96.00 %","title":"Example 4 - Using the Selected Feature Subset For Making New Predictions"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-5-exhaustive-feature-selection-and-gridsearch","text":"# Initialize the dataset from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) Use scikit-learn's GridSearch to tune the hyperparameters of the LogisticRegression estimator inside the ExhaustiveFeatureSelector and use it for prediction in the pipeline. Note that the clone_estimator attribute needs to be set to False . from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS lr = LogisticRegression(multi_class='multinomial', solver='lbfgs', random_state=123) efs1 = EFS(estimator=lr, min_features=2, max_features=3, scoring='accuracy', print_progress=False, clone_estimator=False, cv=5, n_jobs=1) pipe = make_pipeline(efs1, lr) param_grid = {'exhaustivefeatureselector__estimator__C': [0.1, 1.0, 10.0]} gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=2, verbose=1, refit=False) # run gridearch gs = gs.fit(X_train, y_train) Fitting 2 folds for each of 3 candidates, totalling 6 fits [Parallel(n_jobs=1)]: Done 6 out of 6 | elapsed: 2.7s finished ... and the \"best\" parameters determined by GridSearch are ... print(\"Best parameters via GridSearch\", gs.best_params_) Best parameters via GridSearch {'exhaustivefeatureselector__estimator__C': 1.0}","title":"Example 5 - Exhaustive Feature Selection and GridSearch"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#obtaining-the-best-k-feature-indices-after-gridsearch","text":"If we are interested in the best k best feature indices via SequentialFeatureSelection.best_idx_ , we have to initialize a GridSearchCV object with refit=True . Now, the grid search object will take the complete training dataset and the best parameters, which it found via cross-validation, to train the estimator pipeline. gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=2, verbose=1, refit=True) After running the grid search, we can access the individual pipeline objects of the best_estimator_ via the steps attribute. gs = gs.fit(X_train, y_train) gs.best_estimator_.steps Fitting 2 folds for each of 3 candidates, totalling 6 fits [Parallel(n_jobs=1)]: Done 6 out of 6 | elapsed: 2.9s finished [('exhaustivefeatureselector', ExhaustiveFeatureSelector(clone_estimator=False, cv=5, estimator=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='multinomial', n_jobs=1, penalty='l2', random_state=123, solver='lbfgs', tol=0.0001, verbose=0, warm_start=False), max_features=3, min_features=2, n_jobs=1, pre_dispatch='2*n_jobs', print_progress=False, scoring='accuracy')), ('logisticregression', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='multinomial', n_jobs=1, penalty='l2', random_state=123, solver='lbfgs', tol=0.0001, verbose=0, warm_start=False))] Via sub-indexing, we can then obtain the best-selected feature subset: print('Best features:', gs.best_estimator_.steps[0][1].best_idx_) Best features: (2, 3) During cross-validation, this feature combination had a CV accuracy of: print('Best score:', gs.best_score_) Best score: 0.97 gs.best_params_ {'exhaustivefeatureselector__estimator__C': 1.0} Alternatively , if we can set the \"best grid search parameters\" in our pipeline manually if we ran GridSearchCV with refit=False . It should yield the same results: pipe.set_params(**gs.best_params_).fit(X_train, y_train) print('Best features:', pipe.steps[0][1].best_idx_) Best features: (2, 3)","title":"Obtaining the best k feature indices after GridSearch"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#example-6-working-with-pandas-dataframes","text":"Optionally, we can also use pandas DataFrames and pandas Series as input to the fit function. In this case, the column names of the pandas DataFrame will be used as feature names. However, note that if custom_feature_names are provided in the fit function, these custom_feature_names take precedence over the DataFrame column-based feature names. import pandas as pd from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() col_names = ('sepal length', 'sepal width', 'petal length', 'petal width') X_df = pd.DataFrame(iris.data, columns=col_names) y_series = pd.Series(iris.target) knn = KNeighborsClassifier(n_neighbors=4) from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS knn = KNeighborsClassifier(n_neighbors=3) efs1 = EFS(knn, min_features=1, max_features=4, scoring='accuracy', print_progress=True, cv=5) efs1 = efs1.fit(X_df, y_series) print('Best accuracy score: %.2f' % efs1.best_score_) print('Best subset (indices):', efs1.best_idx_) print('Best subset (corresponding names):', efs1.best_feature_names_) Features: 15/15 Best accuracy score: 0.97 Best subset (indices): (0, 2, 3) Best subset (corresponding names): ('sepal length', 'petal length', 'petal width')","title":"Example 6 - Working with pandas DataFrames"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#api","text":"ExhaustiveFeatureSelector(estimator, min_features=1, max_features=1, print_progress=True, scoring='accuracy', cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Exhaustive Feature Selection for Classification and Regression. (new in v0.4.3) Parameters estimator : scikit-learn classifier or regressor min_features : int (default: 1) Minumum number of features to select max_features : int (default: 1) Maximum number of features to select print_progress : bool (default: True) Prints progress as the number of epochs to stderr. scoring : str, (default='accuracy') Scoring metric in {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} for regressors, or a callable object or function with signature scorer(estimator, X, y) . cv : int (default: 5) Scikit-learn cross-validation generator or int . If estimator is a classifier (or y consists of integer class labels), stratified k-fold is performed, and regular k-fold cross-validation otherwise. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes best_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. best_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. best_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the exhaustive selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/","title":"API"},{"location":"user_guide/feature_selection/ExhaustiveFeatureSelector/#methods","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data and return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Return the best selected features from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/","text":"Sequential Feature Selector Implementation of sequential feature algorithms (SFAs) -- greedy search algorithms -- that have been developed as a suboptimal solution to the computationally often not feasible exhaustive search. from mlxtend.feature_selection import SequentialFeatureSelector Overview Sequential feature selection algorithms are a family of greedy search algorithms that are used to reduce an initial d -dimensional feature space to a k -dimensional feature subspace where k < d . The motivation behind feature selection algorithms is to automatically select a subset of features that is most relevant to the problem. The goal of feature selection is two-fold: We want to improve the computational efficiency and reduce the generalization error of the model by removing irrelevant features or noise. A wrapper approach such as sequential feature selection is especially useful if embedded feature selection -- for example, a regularization penalty like LASSO -- is not applicable. In a nutshell, SFAs remove or add one feature at the time based on the classifier performance until a feature subset of the desired size k is reached. There are 4 different flavors of SFAs available via the SequentialFeatureSelector : Sequential Forward Selection (SFS) Sequential Backward Selection (SBS) Sequential Forward Floating Selection (SFFS) Sequential Backward Floating Selection (SBFS) The floating variants, SFFS and SBFS, can be considered as extensions to the simpler SFS and SBS algorithms. The floating algorithms have an additional exclusion or inclusion step to remove features once they were included (or excluded), so that a larger number of feature subset combinations can be sampled. It is important to emphasize that this step is conditional and only occurs if the resulting feature subset is assessed as \"better\" by the criterion function after removal (or addition) of a particular feature. Furthermore, I added an optional check to skip the conditional exclusion steps if the algorithm gets stuck in cycles. How is this different from Recursive Feature Elimination (RFE) -- e.g., as implemented in sklearn.feature_selection.RFE ? RFE is computationally less complex using the feature weight coefficients (e.g., linear models) or feature importance (tree-based algorithms) to eliminate features recursively, whereas SFSs eliminate (or add) features based on a user-defined classifier/regression performance metric. The SFAs are outlined in pseudo code below: Sequential Forward Selection (SFS) Input: Y = \\{y_1, y_2, ..., y_d\\} The SFS algorithm takes the whole d -dimensional feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SFS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = \\emptyset , k = 0 We initialize the algorithm with an empty set \\emptyset (\"null set\") so that k = 0 (where k is the size of the subset). Step 1 (Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k X_{k+1} = X_k + x^+ k = k + 1 Go to Step 1 in this step, we add an additional feature, x^+ , to our feature subset X_k . x^+ is the feature that maximizes our criterion function, that is, the feature that is associated with the best classifier performance if it is added to X_k . We repeat this procedure until the termination criterion is satisfied. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori . Sequential Backward Selection (SBS) Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SBS algorithm takes the whole feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SBS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = Y , k = d We initialize the algorithm with the given feature set so that the k = d . Step 1 (Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k X_{k-1} = X_k - x^- k = k - 1 Go to Step 1 In this step, we remove a feature, x^- from our feature subset X_k . x^- is the feature that maximizes our criterion function upon re,oval, that is, the feature that is associated with the best classifier performance if it is removed from X_k . We repeat this procedure until the termination criterion is satisfied. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori . Sequential Backward Floating Selection (SBFS) Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SBFS algorithm takes the whole feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SBFS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = Y , k = d We initialize the algorithm with the given feature set so that the k = d . Step 1 (Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k X_{k-1} = X_k - x^- k = k - 1 Go to Step 2 In this step, we remove a feature, x^- from our feature subset X_k . x^- is the feature that maximizes our criterion function upon re,oval, that is, the feature that is associated with the best classifier performance if it is removed from X_k . Step 2 (Conditional Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k if J(x_k + x) > J(x_k + x) : X_{k+1} = X_k + x^+ k = k + 1 Go to Step 1 In Step 2, we search for features that improve the classifier performance if they are added back to the feature subset. If such features exist, we add the feature x^+ for which the performance improvement is maximized. If k = 2 or an improvement cannot be made (i.e., such feature x^+ cannot be found), go back to step 1; else, repeat this step. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori . Sequential Forward Floating Selection (SFFS) Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SFFS algorithm takes the whole feature set as input, if our feature space consists of, e.g. 10, if our feature space consists of 10 dimensions ( d = 10 ). Output: a subset of features, X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) The returned output of the algorithm is a subset of the feature space of a specified size. E.g., a subset of 5 features from a 10-dimensional feature space ( k = 5, d = 10 ). Initialization: X_0 = Y , k = d We initialize the algorithm with an empty set (\"null set\") so that the k = 0 (where k is the size of the subset) Step 1 (Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k X_{k+1} = X_k + x^+ k = k + 1 Go to Step 2 Step 2 (Conditional Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k if \\; J(x_k - x) > J(x_k - x) : X_{k-1} = X_k - x^- k = k - 1 Go to Step 1 In step 1, we include the feature from the feature space that leads to the best performance increase for our feature subset (assessed by the criterion function ). Then, we go over to step 2 In step 2, we only remove a feature if the resulting subset would gain an increase in performance. If k = 2 or an improvement cannot be made (i.e., such feature x^+ cannot be found), go back to step 1; else, repeat this step. Steps 1 and 2 are repeated until the Termination criterion is reached. Termination: stop when k equals the number of desired features References Ferri, F. J., Pudil P., Hatef, M., Kittler, J. (1994). \"Comparative study of techniques for large-scale feature selection.\" Pattern Recognition in Practice IV : 403-413. Pudil, P., Novovi\u010dov\u00e1, J., & Kittler, J. (1994). \"Floating search methods in feature selection.\" Pattern recognition letters 15.11 (1994): 1119-1125. Example 1 - A simple Sequential Forward Selection example Initializing a simple classifier from scikit-learn: from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) We start by selection the \"best\" 3 features from the Iris dataset via Sequential Forward Selection (SFS). Here, we set forward=True and floating=False . By choosing cv=0 , we don't perform any cross-validation, therefore, the performance (here: 'accuracy' ) is computed entirely on the training set. from mlxtend.feature_selection import SequentialFeatureSelector as SFS sfs1 = SFS(knn, k_features=3, forward=True, floating=False, verbose=2, scoring='accuracy', cv=0) sfs1 = sfs1.fit(X, y) [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 1/3 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 2/3 -- score: 0.973333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 3/3 -- score: 0.973333333333 Via the subsets_ attribute, we can take a look at the selected feature indices at each step: sfs1.subsets_ {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('3',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('2', '3')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('1', '2', '3')}} Note that the 'feature_names' entry is simply a string representation of the 'feature_idx' in this case. Optionally, we can provide custom feature names via the fit method's custom_feature_names parameter: feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') sfs1 = sfs1.fit(X, y, custom_feature_names=feature_names) sfs1.subsets_ [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 1/3 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 2/3 -- score: 0.973333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 3/3 -- score: 0.973333333333 {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('petal length', 'petal width')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('sepal width', 'petal length', 'petal width')}} Furthermore, we can access the indices of the 3 best features directly via the k_feature_idx_ attribute: sfs1.k_feature_idx_ (1, 2, 3) And similarly, to obtain the names of these features, given that we provided an argument to the custom_feature_names parameter, we can refer to the sfs1.k_feature_names_ attribute: sfs1.k_feature_names_ ('sepal width', 'petal length', 'petal width') Finally, the prediction score for these 3 features can be accesses via k_score_ : sfs1.k_score_ 0.97333333333333338 Example 2 - Toggling between SFS, SBS, SFFS, and SBFS Using the forward and floating parameters, we can toggle between SFS, SBS, SFFS, and SBFS as shown below. Note that we are performing (stratified) 4-fold cross-validation for more robust estimates in contrast to Example 1. Via n_jobs=-1 , we choose to run the cross-validation on all our available CPU cores. # Sequential Forward Selection sfs = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=4, n_jobs=-1) sfs = sfs.fit(X, y) print('\\nSequential Forward Selection (k=3):') print(sfs.k_feature_idx_) print('CV Score:') print(sfs.k_score_) ################################################### # Sequential Backward Selection sbs = SFS(knn, k_features=3, forward=False, floating=False, scoring='accuracy', cv=4, n_jobs=-1) sbs = sbs.fit(X, y) print('\\nSequential Backward Selection (k=3):') print(sbs.k_feature_idx_) print('CV Score:') print(sbs.k_score_) ################################################### # Sequential Forward Floating Selection sffs = SFS(knn, k_features=3, forward=True, floating=True, scoring='accuracy', cv=4, n_jobs=-1) sffs = sffs.fit(X, y) print('\\nSequential Forward Floating Selection (k=3):') print(sffs.k_feature_idx_) print('CV Score:') print(sffs.k_score_) ################################################### # Sequential Backward Floating Selection sbfs = SFS(knn, k_features=3, forward=False, floating=True, scoring='accuracy', cv=4, n_jobs=-1) sbfs = sbfs.fit(X, y) print('\\nSequential Backward Floating Selection (k=3):') print(sbfs.k_feature_idx_) print('CV Score:') print(sbfs.k_score_) Sequential Forward Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Backward Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Forward Floating Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Backward Floating Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 In this simple scenario, selecting the best 3 features out of the 4 available features in the Iris set, we end up with similar results regardless of which sequential selection algorithms we used. Example 3 - Visualizing the results in DataFrames For our convenience, we can visualize the output from the feature selection in a pandas DataFrame format using the get_metric_dict method of the SequentialFeatureSelector object. The columns std_dev and std_err represent the standard deviation and standard errors of the cross-validation scores, respectively. Below, we see the DataFrame of the Sequential Forward Selector from Example 2: import pandas as pd pd.DataFrame.from_dict(sfs.get_metric_dict()).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 1 0.952991 0.0660624 [0.974358974359, 0.948717948718, 0.88888888888... (3,) (3,) 0.0412122 0.0237939 2 0.959936 0.0494801 [0.974358974359, 0.948717948718, 0.91666666666... (2, 3) (2, 3) 0.0308676 0.0178214 3 0.972756 0.0315204 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 Now, let's compare it to the Sequential Backward Selector: pd.DataFrame.from_dict(sbs.get_metric_dict()).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 3 0.972756 0.0315204 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 4 0.952991 0.0372857 [0.974358974359, 0.948717948718, 0.91666666666... (0, 1, 2, 3) (0, 1, 2, 3) 0.0232602 0.0134293 We can see that both SFS and SBFS found the same \"best\" 3 features, however, the intermediate steps where obviously different. The ci_bound column in the DataFrames above represents the confidence interval around the computed cross-validation scores. By default, a confidence interval of 95% is used, but we can use different confidence bounds via the confidence_interval parameter. E.g., the confidence bounds for a 90% confidence interval can be obtained as follows: pd.DataFrame.from_dict(sbs.get_metric_dict(confidence_interval=0.90)).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 3 0.972756 0.0242024 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 4 0.952991 0.0286292 [0.974358974359, 0.948717948718, 0.91666666666... (0, 1, 2, 3) (0, 1, 2, 3) 0.0232602 0.0134293 Example 4 - Plotting the results After importing the little helper function plotting.plot_sequential_feature_selection , we can also visualize the results using matplotlib figures. from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs import matplotlib.pyplot as plt sfs = SFS(knn, k_features=4, forward=True, floating=False, scoring='accuracy', verbose=2, cv=5) sfs = sfs.fit(X, y) fig1 = plot_sfs(sfs.get_metric_dict(), kind='std_dev') plt.ylim([0.8, 1]) plt.title('Sequential Forward Selection (w. StdDev)') plt.grid() plt.show() [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 1/4 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 2/4 -- score: 0.966666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 3/4 -- score: 0.953333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 4/4 -- score: 0.973333333333 Example 5 - Sequential Feature Selection for Regression Similar to the classification examples above, the SequentialFeatureSelector also supports scikit-learn's estimators for regression. from sklearn.linear_model import LinearRegression from sklearn.datasets import load_boston boston = load_boston() X, y = boston.data, boston.target lr = LinearRegression() sfs = SFS(lr, k_features=13, forward=True, floating=False, scoring='neg_mean_squared_error', cv=10) sfs = sfs.fit(X, y) fig = plot_sfs(sfs.get_metric_dict(), kind='std_err') plt.title('Sequential Forward Selection (w. StdErr)') plt.grid() plt.show() Example 6 -- Feature Selection with Fixed Train/Validation Splits If you do not wish to use cross-validation (here: k-fold cross-validation, i.e., rotating training and validation folds), you can use the PredefinedHoldoutSplit class to specify your own, fixed training and validation split. from sklearn.datasets import load_iris from mlxtend.evaluate import PredefinedHoldoutSplit import numpy as np iris = load_iris() X = iris.data y = iris.target rng = np.random.RandomState(123) my_validation_indices = rng.permutation(np.arange(150))[:30] print(my_validation_indices) [ 72 112 132 88 37 138 87 42 8 90 141 33 59 116 135 104 36 13 63 45 28 133 24 127 46 20 31 121 117 4] from sklearn.neighbors import KNeighborsClassifier from mlxtend.feature_selection import SequentialFeatureSelector as SFS knn = KNeighborsClassifier(n_neighbors=4) piter = PredefinedHoldoutSplit(my_validation_indices) sfs1 = SFS(knn, k_features=3, forward=True, floating=False, verbose=2, scoring='accuracy', cv=piter) sfs1 = sfs1.fit(X, y) [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 1/3 -- score: 0.9666666666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 2/3 -- score: 0.9666666666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 3/3 -- score: 0.9666666666666667 Example 7 -- Using the Selected Feature Subset For Making New Predictions # Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) knn = KNeighborsClassifier(n_neighbors=4) # Select the \"best\" three features via # 5-fold cross-validation on the training set. from mlxtend.feature_selection import SequentialFeatureSelector as SFS sfs1 = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=5) sfs1 = sfs1.fit(X_train, y_train) print('Selected features:', sfs1.k_feature_idx_) Selected features: (1, 2, 3) # Generate the new subsets based on the selected features # Note that the transform call is equivalent to # X_train[:, sfs1.k_feature_idx_] X_train_sfs = sfs1.transform(X_train) X_test_sfs = sfs1.transform(X_test) # Fit the estimator using the new feature subset # and make a prediction on the test data knn.fit(X_train_sfs, y_train) y_pred = knn.predict(X_test_sfs) # Compute the accuracy of the prediction acc = float((y_test == y_pred).sum()) / y_pred.shape[0] print('Test set accuracy: %.2f %%' % (acc * 100)) Test set accuracy: 96.00 % Example 8 -- Sequential Feature Selection and GridSearch # Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) Use scikit-learn's GridSearch to tune the hyperparameters inside and outside the SequentialFeatureSelector : from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from mlxtend.feature_selection import SequentialFeatureSelector as SFS import mlxtend knn = KNeighborsClassifier(n_neighbors=2) sfs1 = SFS(estimator=knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=5) pipe = Pipeline([('sfs', sfs1), ('knn', knn)]) param_grid = [ {'sfs__k_features': [1, 2, 3, 4], 'sfs__estimator__n_neighbors': [1, 2, 3, 4]} ] gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=5, refit=False) # run gridearch gs = gs.fit(X_train, y_train) ... and the \"best\" parameters determined by GridSearch are ... print(\"Best parameters via GridSearch\", gs.best_params_) Best parameters via GridSearch {'sfs__estimator__n_neighbors': 1, 'sfs__k_features': 3} Obtaining the best k feature indices after GridSearch If we are interested in the best k feature indices via SequentialFeatureSelection.k_feature_idx_ , we have to initialize a GridSearchCV object with refit=True . Now, the grid search object will take the complete training dataset and the best parameters, which it found via cross-validation, to train the estimator pipeline. gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=5, refit=True) gs = gs.fit(X_train, y_train) After running the grid search, we can access the individual pipeline objects of the best_estimator_ via the steps attribute. gs.best_estimator_.steps [('sfs', SequentialFeatureSelector(clone_estimator=True, cv=5, estimator=KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=1, p=2, weights='uniform'), floating=False, forward=True, k_features=3, n_jobs=1, pre_dispatch='2*n_jobs', scoring='accuracy', verbose=0)), ('knn', KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=2, p=2, weights='uniform'))] Via sub-indexing, we can then obtain the best-selected feature subset: print('Best features:', gs.best_estimator_.steps[0][1].k_feature_idx_) Best features: (0, 1, 3) During cross-validation, this feature combination had a CV accuracy of: print('Best score:', gs.best_score_) Best score: 0.94 gs.best_params_ {'sfs__estimator__n_neighbors': 1, 'sfs__k_features': 3} Alternatively , if we can set the \"best grid search parameters\" in our pipeline manually if we ran GridSearchCV with refit=False . It should yield the same results: pipe.set_params(**gs.best_params_).fit(X_train, y_train) print('Best features:', pipe.steps[0][1].k_feature_idx_) Best features: (0, 1, 3) Example 9 -- Selecting the \"best\" feature combination in a k-range If k_features is set to to a tuple (min_k, max_k) (new in 0.4.2), the SFS will now select the best feature combination that it discovered by iterating from k=1 to max_k (forward), or max_k to min_k (backward). The size of the returned feature subset is then within max_k to min_k , depending on which combination scored best during cross validation. X.shape (150, 4) from mlxtend.feature_selection import SequentialFeatureSelector as SFS from sklearn.neighbors import KNeighborsClassifier from mlxtend.data import wine_data from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline X, y = wine_data() X_train, X_test, y_train, y_test= train_test_split(X, y, stratify=y, test_size=0.3, random_state=1) knn = KNeighborsClassifier(n_neighbors=2) sfs1 = SFS(estimator=knn, k_features=(3, 10), forward=True, floating=False, scoring='accuracy', cv=5) pipe = make_pipeline(StandardScaler(), sfs1) pipe.fit(X_train, y_train) print('best combination (ACC: %.3f): %s\\n' % (sfs1.k_score_, sfs1.k_feature_idx_)) print('all subsets:\\n', sfs1.subsets_) plot_sfs(sfs1.get_metric_dict(), kind='std_err'); best combination (ACC: 0.992): (0, 1, 2, 3, 6, 8, 9, 10, 11, 12) all subsets: {1: {'feature_idx': (6,), 'cv_scores': array([ 0.84615385, 0.6 , 0.88 , 0.79166667, 0.875 ]), 'avg_score': 0.7985641025641026, 'feature_names': ('6',)}, 2: {'feature_idx': (6, 9), 'cv_scores': array([ 0.92307692, 0.88 , 1. , 0.95833333, 0.91666667]), 'avg_score': 0.93561538461538463, 'feature_names': ('6', '9')}, 3: {'feature_idx': (6, 9, 12), 'cv_scores': array([ 0.92307692, 0.92 , 0.96 , 1. , 0.95833333]), 'avg_score': 0.95228205128205123, 'feature_names': ('6', '9', '12')}, 4: {'feature_idx': (3, 6, 9, 12), 'cv_scores': array([ 0.96153846, 0.96 , 0.96 , 1. , 0.95833333]), 'avg_score': 0.96797435897435891, 'feature_names': ('3', '6', '9', '12')}, 5: {'feature_idx': (3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.96 , 1. , 1. , 1. ]), 'avg_score': 0.97661538461538466, 'feature_names': ('3', '6', '9', '10', '12')}, 6: {'feature_idx': (2, 3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.96 , 1. , 0.95833333, 1. ]), 'avg_score': 0.96828205128205125, 'feature_names': ('2', '3', '6', '9', '10', '12')}, 7: {'feature_idx': (0, 2, 3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.92 , 1. , 1. , 1. ]), 'avg_score': 0.96861538461538466, 'feature_names': ('0', '2', '3', '6', '9', '10', '12')}, 8: {'feature_idx': (0, 2, 3, 6, 8, 9, 10, 12), 'cv_scores': array([ 1. , 0.92, 1. , 1. , 1. ]), 'avg_score': 0.98399999999999999, 'feature_names': ('0', '2', '3', '6', '8', '9', '10', '12')}, 9: {'feature_idx': (0, 2, 3, 6, 8, 9, 10, 11, 12), 'cv_scores': array([ 1. , 0.92, 1. , 1. , 1. ]), 'avg_score': 0.98399999999999999, 'feature_names': ('0', '2', '3', '6', '8', '9', '10', '11', '12')}, 10: {'feature_idx': (0, 1, 2, 3, 6, 8, 9, 10, 11, 12), 'cv_scores': array([ 1. , 0.96, 1. , 1. , 1. ]), 'avg_score': 0.99199999999999999, 'feature_names': ('0', '1', '2', '3', '6', '8', '9', '10', '11', '12')}} Example 10 -- Using other cross-validation schemes In addition to standard k-fold and stratified k-fold, other cross validation schemes can be used with SequentialFeatureSelector . For example, GroupKFold or LeaveOneOut cross-validation from scikit-learn. Using GroupKFold with SequentialFeatureSelector from mlxtend.feature_selection import SequentialFeatureSelector as SFS from sklearn.neighbors import KNeighborsClassifier from mlxtend.data import iris_data from sklearn.model_selection import GroupKFold import numpy as np X, y = iris_data() groups = np.arange(len(y)) // 10 print('groups: {}'.format(groups)) groups: [ 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 5 6 6 6 6 6 6 6 6 6 6 7 7 7 7 7 7 7 7 7 7 8 8 8 8 8 8 8 8 8 8 9 9 9 9 9 9 9 9 9 9 10 10 10 10 10 10 10 10 10 10 11 11 11 11 11 11 11 11 11 11 12 12 12 12 12 12 12 12 12 12 13 13 13 13 13 13 13 13 13 13 14 14 14 14 14 14 14 14 14 14] Calling the split() method of a scikit-learn cross-validator object will return a generator that yields train, test splits. cv_gen = GroupKFold(4).split(X, y, groups) cv_gen The cv parameter of SequentialFeatureSelector must be either an int or an iterable yielding train, test splits. This iterable can be constructed by passing the train, test split generator to the built-in list() function. cv = list(cv_gen) knn = KNeighborsClassifier(n_neighbors=2) sfs = SFS(estimator=knn, k_features=2, scoring='accuracy', cv=cv) sfs.fit(X, y) print('best combination (ACC: %.3f): %s\\n' % (sfs.k_score_, sfs.k_feature_idx_)) best combination (ACC: 0.940): (2, 3) Example 11 - Working with pandas DataFrames Example 12 - Using Pandas DataFrames Optionally, we can also use pandas DataFrames and pandas Series as input to the fit function. In this case, the column names of the pandas DataFrame will be used as feature names. However, note that if custom_feature_names are provided in the fit function, these custom_feature_names take precedence over the DataFrame column-based feature names. import pandas as pd from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from mlxtend.feature_selection import SequentialFeatureSelector as SFS iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) sfs1 = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=0) X_df = pd.DataFrame(X, columns=['sepal len', 'petal len', 'sepal width', 'petal width']) X_df.head() .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } sepal len petal len sepal width petal width 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 Also, the target array, y , can be optionally be cast as a Series: y_series = pd.Series(y) y_series.head() 0 0 1 0 2 0 3 0 4 0 dtype: int64 sfs1 = sfs1.fit(X_df, y_series) Note that the only difference of passing a pandas DataFrame as input is that the sfs1.subsets_ array will now contain a new column, sfs1.subsets_ {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('sepal width', 'petal width')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('petal len', 'sepal width', 'petal width')}} In mlxtend version >= 0.13 pandas DataFrames are supported as feature inputs to the SequentianFeatureSelector instead of NumPy arrays or other NumPy-like array types. API SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/ Methods fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"Sequential Feature Selector"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#sequential-feature-selector","text":"Implementation of sequential feature algorithms (SFAs) -- greedy search algorithms -- that have been developed as a suboptimal solution to the computationally often not feasible exhaustive search. from mlxtend.feature_selection import SequentialFeatureSelector","title":"Sequential Feature Selector"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#overview","text":"Sequential feature selection algorithms are a family of greedy search algorithms that are used to reduce an initial d -dimensional feature space to a k -dimensional feature subspace where k < d . The motivation behind feature selection algorithms is to automatically select a subset of features that is most relevant to the problem. The goal of feature selection is two-fold: We want to improve the computational efficiency and reduce the generalization error of the model by removing irrelevant features or noise. A wrapper approach such as sequential feature selection is especially useful if embedded feature selection -- for example, a regularization penalty like LASSO -- is not applicable. In a nutshell, SFAs remove or add one feature at the time based on the classifier performance until a feature subset of the desired size k is reached. There are 4 different flavors of SFAs available via the SequentialFeatureSelector : Sequential Forward Selection (SFS) Sequential Backward Selection (SBS) Sequential Forward Floating Selection (SFFS) Sequential Backward Floating Selection (SBFS) The floating variants, SFFS and SBFS, can be considered as extensions to the simpler SFS and SBS algorithms. The floating algorithms have an additional exclusion or inclusion step to remove features once they were included (or excluded), so that a larger number of feature subset combinations can be sampled. It is important to emphasize that this step is conditional and only occurs if the resulting feature subset is assessed as \"better\" by the criterion function after removal (or addition) of a particular feature. Furthermore, I added an optional check to skip the conditional exclusion steps if the algorithm gets stuck in cycles. How is this different from Recursive Feature Elimination (RFE) -- e.g., as implemented in sklearn.feature_selection.RFE ? RFE is computationally less complex using the feature weight coefficients (e.g., linear models) or feature importance (tree-based algorithms) to eliminate features recursively, whereas SFSs eliminate (or add) features based on a user-defined classifier/regression performance metric. The SFAs are outlined in pseudo code below:","title":"Overview"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#sequential-forward-selection-sfs","text":"Input: Y = \\{y_1, y_2, ..., y_d\\} The SFS algorithm takes the whole d -dimensional feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SFS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = \\emptyset , k = 0 We initialize the algorithm with an empty set \\emptyset (\"null set\") so that k = 0 (where k is the size of the subset). Step 1 (Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k X_{k+1} = X_k + x^+ k = k + 1 Go to Step 1 in this step, we add an additional feature, x^+ , to our feature subset X_k . x^+ is the feature that maximizes our criterion function, that is, the feature that is associated with the best classifier performance if it is added to X_k . We repeat this procedure until the termination criterion is satisfied. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori .","title":"Sequential Forward Selection (SFS)"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#sequential-backward-selection-sbs","text":"Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SBS algorithm takes the whole feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SBS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = Y , k = d We initialize the algorithm with the given feature set so that the k = d . Step 1 (Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k X_{k-1} = X_k - x^- k = k - 1 Go to Step 1 In this step, we remove a feature, x^- from our feature subset X_k . x^- is the feature that maximizes our criterion function upon re,oval, that is, the feature that is associated with the best classifier performance if it is removed from X_k . We repeat this procedure until the termination criterion is satisfied. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori .","title":"Sequential Backward Selection (SBS)"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#sequential-backward-floating-selection-sbfs","text":"Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SBFS algorithm takes the whole feature set as input. Output: X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) SBFS returns a subset of features; the number of selected features k , where k < d , has to be specified a priori . Initialization: X_0 = Y , k = d We initialize the algorithm with the given feature set so that the k = d . Step 1 (Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k X_{k-1} = X_k - x^- k = k - 1 Go to Step 2 In this step, we remove a feature, x^- from our feature subset X_k . x^- is the feature that maximizes our criterion function upon re,oval, that is, the feature that is associated with the best classifier performance if it is removed from X_k . Step 2 (Conditional Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k if J(x_k + x) > J(x_k + x) : X_{k+1} = X_k + x^+ k = k + 1 Go to Step 1 In Step 2, we search for features that improve the classifier performance if they are added back to the feature subset. If such features exist, we add the feature x^+ for which the performance improvement is maximized. If k = 2 or an improvement cannot be made (i.e., such feature x^+ cannot be found), go back to step 1; else, repeat this step. Termination: k = p We add features from the feature subset X_k until the feature subset of size k contains the number of desired features p that we specified a priori .","title":"Sequential Backward Floating Selection (SBFS)"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#sequential-forward-floating-selection-sffs","text":"Input: the set of all features, Y = \\{y_1, y_2, ..., y_d\\} The SFFS algorithm takes the whole feature set as input, if our feature space consists of, e.g. 10, if our feature space consists of 10 dimensions ( d = 10 ). Output: a subset of features, X_k = \\{x_j \\; | \\;j = 1, 2, ..., k; \\; x_j \\in Y\\} , where k = (0, 1, 2, ..., d) The returned output of the algorithm is a subset of the feature space of a specified size. E.g., a subset of 5 features from a 10-dimensional feature space ( k = 5, d = 10 ). Initialization: X_0 = Y , k = d We initialize the algorithm with an empty set (\"null set\") so that the k = 0 (where k is the size of the subset) Step 1 (Inclusion): x^+ = \\text{ arg max } J(x_k + x), \\text{ where } x \\in Y - X_k X_{k+1} = X_k + x^+ k = k + 1 Go to Step 2 Step 2 (Conditional Exclusion): x^- = \\text{ arg max } J(x_k - x), \\text{ where } x \\in X_k if \\; J(x_k - x) > J(x_k - x) : X_{k-1} = X_k - x^- k = k - 1 Go to Step 1 In step 1, we include the feature from the feature space that leads to the best performance increase for our feature subset (assessed by the criterion function ). Then, we go over to step 2 In step 2, we only remove a feature if the resulting subset would gain an increase in performance. If k = 2 or an improvement cannot be made (i.e., such feature x^+ cannot be found), go back to step 1; else, repeat this step. Steps 1 and 2 are repeated until the Termination criterion is reached. Termination: stop when k equals the number of desired features","title":"Sequential Forward Floating Selection (SFFS)"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#references","text":"Ferri, F. J., Pudil P., Hatef, M., Kittler, J. (1994). \"Comparative study of techniques for large-scale feature selection.\" Pattern Recognition in Practice IV : 403-413. Pudil, P., Novovi\u010dov\u00e1, J., & Kittler, J. (1994). \"Floating search methods in feature selection.\" Pattern recognition letters 15.11 (1994): 1119-1125.","title":"References"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-1-a-simple-sequential-forward-selection-example","text":"Initializing a simple classifier from scikit-learn: from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) We start by selection the \"best\" 3 features from the Iris dataset via Sequential Forward Selection (SFS). Here, we set forward=True and floating=False . By choosing cv=0 , we don't perform any cross-validation, therefore, the performance (here: 'accuracy' ) is computed entirely on the training set. from mlxtend.feature_selection import SequentialFeatureSelector as SFS sfs1 = SFS(knn, k_features=3, forward=True, floating=False, verbose=2, scoring='accuracy', cv=0) sfs1 = sfs1.fit(X, y) [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 1/3 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 2/3 -- score: 0.973333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 3/3 -- score: 0.973333333333 Via the subsets_ attribute, we can take a look at the selected feature indices at each step: sfs1.subsets_ {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('3',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('2', '3')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('1', '2', '3')}} Note that the 'feature_names' entry is simply a string representation of the 'feature_idx' in this case. Optionally, we can provide custom feature names via the fit method's custom_feature_names parameter: feature_names = ('sepal length', 'sepal width', 'petal length', 'petal width') sfs1 = sfs1.fit(X, y, custom_feature_names=feature_names) sfs1.subsets_ [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 1/3 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 2/3 -- score: 0.973333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:16] Features: 3/3 -- score: 0.973333333333 {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('petal length', 'petal width')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('sepal width', 'petal length', 'petal width')}} Furthermore, we can access the indices of the 3 best features directly via the k_feature_idx_ attribute: sfs1.k_feature_idx_ (1, 2, 3) And similarly, to obtain the names of these features, given that we provided an argument to the custom_feature_names parameter, we can refer to the sfs1.k_feature_names_ attribute: sfs1.k_feature_names_ ('sepal width', 'petal length', 'petal width') Finally, the prediction score for these 3 features can be accesses via k_score_ : sfs1.k_score_ 0.97333333333333338","title":"Example 1 - A simple Sequential Forward Selection example"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-2-toggling-between-sfs-sbs-sffs-and-sbfs","text":"Using the forward and floating parameters, we can toggle between SFS, SBS, SFFS, and SBFS as shown below. Note that we are performing (stratified) 4-fold cross-validation for more robust estimates in contrast to Example 1. Via n_jobs=-1 , we choose to run the cross-validation on all our available CPU cores. # Sequential Forward Selection sfs = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=4, n_jobs=-1) sfs = sfs.fit(X, y) print('\\nSequential Forward Selection (k=3):') print(sfs.k_feature_idx_) print('CV Score:') print(sfs.k_score_) ################################################### # Sequential Backward Selection sbs = SFS(knn, k_features=3, forward=False, floating=False, scoring='accuracy', cv=4, n_jobs=-1) sbs = sbs.fit(X, y) print('\\nSequential Backward Selection (k=3):') print(sbs.k_feature_idx_) print('CV Score:') print(sbs.k_score_) ################################################### # Sequential Forward Floating Selection sffs = SFS(knn, k_features=3, forward=True, floating=True, scoring='accuracy', cv=4, n_jobs=-1) sffs = sffs.fit(X, y) print('\\nSequential Forward Floating Selection (k=3):') print(sffs.k_feature_idx_) print('CV Score:') print(sffs.k_score_) ################################################### # Sequential Backward Floating Selection sbfs = SFS(knn, k_features=3, forward=False, floating=True, scoring='accuracy', cv=4, n_jobs=-1) sbfs = sbfs.fit(X, y) print('\\nSequential Backward Floating Selection (k=3):') print(sbfs.k_feature_idx_) print('CV Score:') print(sbfs.k_score_) Sequential Forward Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Backward Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Forward Floating Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 Sequential Backward Floating Selection (k=3): (1, 2, 3) CV Score: 0.972756410256 In this simple scenario, selecting the best 3 features out of the 4 available features in the Iris set, we end up with similar results regardless of which sequential selection algorithms we used.","title":"Example 2 - Toggling between SFS, SBS, SFFS, and SBFS"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-3-visualizing-the-results-in-dataframes","text":"For our convenience, we can visualize the output from the feature selection in a pandas DataFrame format using the get_metric_dict method of the SequentialFeatureSelector object. The columns std_dev and std_err represent the standard deviation and standard errors of the cross-validation scores, respectively. Below, we see the DataFrame of the Sequential Forward Selector from Example 2: import pandas as pd pd.DataFrame.from_dict(sfs.get_metric_dict()).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 1 0.952991 0.0660624 [0.974358974359, 0.948717948718, 0.88888888888... (3,) (3,) 0.0412122 0.0237939 2 0.959936 0.0494801 [0.974358974359, 0.948717948718, 0.91666666666... (2, 3) (2, 3) 0.0308676 0.0178214 3 0.972756 0.0315204 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 Now, let's compare it to the Sequential Backward Selector: pd.DataFrame.from_dict(sbs.get_metric_dict()).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 3 0.972756 0.0315204 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 4 0.952991 0.0372857 [0.974358974359, 0.948717948718, 0.91666666666... (0, 1, 2, 3) (0, 1, 2, 3) 0.0232602 0.0134293 We can see that both SFS and SBFS found the same \"best\" 3 features, however, the intermediate steps where obviously different. The ci_bound column in the DataFrames above represents the confidence interval around the computed cross-validation scores. By default, a confidence interval of 95% is used, but we can use different confidence bounds via the confidence_interval parameter. E.g., the confidence bounds for a 90% confidence interval can be obtained as follows: pd.DataFrame.from_dict(sbs.get_metric_dict(confidence_interval=0.90)).T .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } avg_score ci_bound cv_scores feature_idx feature_names std_dev std_err 3 0.972756 0.0242024 [0.974358974359, 1.0, 0.944444444444, 0.972222... (1, 2, 3) (1, 2, 3) 0.0196636 0.0113528 4 0.952991 0.0286292 [0.974358974359, 0.948717948718, 0.91666666666... (0, 1, 2, 3) (0, 1, 2, 3) 0.0232602 0.0134293","title":"Example 3 - Visualizing the results in DataFrames"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-4-plotting-the-results","text":"After importing the little helper function plotting.plot_sequential_feature_selection , we can also visualize the results using matplotlib figures. from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs import matplotlib.pyplot as plt sfs = SFS(knn, k_features=4, forward=True, floating=False, scoring='accuracy', verbose=2, cv=5) sfs = sfs.fit(X, y) fig1 = plot_sfs(sfs.get_metric_dict(), kind='std_dev') plt.ylim([0.8, 1]) plt.title('Sequential Forward Selection (w. StdDev)') plt.grid() plt.show() [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 1/4 -- score: 0.96[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 2/4 -- score: 0.966666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 3/4 -- score: 0.953333333333[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s finished [2018-05-06 12:49:18] Features: 4/4 -- score: 0.973333333333","title":"Example 4 - Plotting the results"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-5-sequential-feature-selection-for-regression","text":"Similar to the classification examples above, the SequentialFeatureSelector also supports scikit-learn's estimators for regression. from sklearn.linear_model import LinearRegression from sklearn.datasets import load_boston boston = load_boston() X, y = boston.data, boston.target lr = LinearRegression() sfs = SFS(lr, k_features=13, forward=True, floating=False, scoring='neg_mean_squared_error', cv=10) sfs = sfs.fit(X, y) fig = plot_sfs(sfs.get_metric_dict(), kind='std_err') plt.title('Sequential Forward Selection (w. StdErr)') plt.grid() plt.show()","title":"Example 5 - Sequential Feature Selection for Regression"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-6-feature-selection-with-fixed-trainvalidation-splits","text":"If you do not wish to use cross-validation (here: k-fold cross-validation, i.e., rotating training and validation folds), you can use the PredefinedHoldoutSplit class to specify your own, fixed training and validation split. from sklearn.datasets import load_iris from mlxtend.evaluate import PredefinedHoldoutSplit import numpy as np iris = load_iris() X = iris.data y = iris.target rng = np.random.RandomState(123) my_validation_indices = rng.permutation(np.arange(150))[:30] print(my_validation_indices) [ 72 112 132 88 37 138 87 42 8 90 141 33 59 116 135 104 36 13 63 45 28 133 24 127 46 20 31 121 117 4] from sklearn.neighbors import KNeighborsClassifier from mlxtend.feature_selection import SequentialFeatureSelector as SFS knn = KNeighborsClassifier(n_neighbors=4) piter = PredefinedHoldoutSplit(my_validation_indices) sfs1 = SFS(knn, k_features=3, forward=True, floating=False, verbose=2, scoring='accuracy', cv=piter) sfs1 = sfs1.fit(X, y) [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 1/3 -- score: 0.9666666666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 2/3 -- score: 0.9666666666666667[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.0s finished [2018-09-24 02:31:21] Features: 3/3 -- score: 0.9666666666666667","title":"Example 6 -- Feature Selection with Fixed Train/Validation Splits"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-7-using-the-selected-feature-subset-for-making-new-predictions","text":"# Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) knn = KNeighborsClassifier(n_neighbors=4) # Select the \"best\" three features via # 5-fold cross-validation on the training set. from mlxtend.feature_selection import SequentialFeatureSelector as SFS sfs1 = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=5) sfs1 = sfs1.fit(X_train, y_train) print('Selected features:', sfs1.k_feature_idx_) Selected features: (1, 2, 3) # Generate the new subsets based on the selected features # Note that the transform call is equivalent to # X_train[:, sfs1.k_feature_idx_] X_train_sfs = sfs1.transform(X_train) X_test_sfs = sfs1.transform(X_test) # Fit the estimator using the new feature subset # and make a prediction on the test data knn.fit(X_train_sfs, y_train) y_pred = knn.predict(X_test_sfs) # Compute the accuracy of the prediction acc = float((y_test == y_pred).sum()) / y_pred.shape[0] print('Test set accuracy: %.2f %%' % (acc * 100)) Test set accuracy: 96.00 %","title":"Example 7 -- Using the Selected Feature Subset For Making New Predictions"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-8-sequential-feature-selection-and-gridsearch","text":"# Initialize the dataset from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X, y = iris.data, iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=1) Use scikit-learn's GridSearch to tune the hyperparameters inside and outside the SequentialFeatureSelector : from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from mlxtend.feature_selection import SequentialFeatureSelector as SFS import mlxtend knn = KNeighborsClassifier(n_neighbors=2) sfs1 = SFS(estimator=knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=5) pipe = Pipeline([('sfs', sfs1), ('knn', knn)]) param_grid = [ {'sfs__k_features': [1, 2, 3, 4], 'sfs__estimator__n_neighbors': [1, 2, 3, 4]} ] gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=5, refit=False) # run gridearch gs = gs.fit(X_train, y_train) ... and the \"best\" parameters determined by GridSearch are ... print(\"Best parameters via GridSearch\", gs.best_params_) Best parameters via GridSearch {'sfs__estimator__n_neighbors': 1, 'sfs__k_features': 3}","title":"Example 8 -- Sequential Feature Selection and GridSearch"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#obtaining-the-best-k-feature-indices-after-gridsearch","text":"If we are interested in the best k feature indices via SequentialFeatureSelection.k_feature_idx_ , we have to initialize a GridSearchCV object with refit=True . Now, the grid search object will take the complete training dataset and the best parameters, which it found via cross-validation, to train the estimator pipeline. gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', n_jobs=1, cv=5, refit=True) gs = gs.fit(X_train, y_train) After running the grid search, we can access the individual pipeline objects of the best_estimator_ via the steps attribute. gs.best_estimator_.steps [('sfs', SequentialFeatureSelector(clone_estimator=True, cv=5, estimator=KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=1, p=2, weights='uniform'), floating=False, forward=True, k_features=3, n_jobs=1, pre_dispatch='2*n_jobs', scoring='accuracy', verbose=0)), ('knn', KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=2, p=2, weights='uniform'))] Via sub-indexing, we can then obtain the best-selected feature subset: print('Best features:', gs.best_estimator_.steps[0][1].k_feature_idx_) Best features: (0, 1, 3) During cross-validation, this feature combination had a CV accuracy of: print('Best score:', gs.best_score_) Best score: 0.94 gs.best_params_ {'sfs__estimator__n_neighbors': 1, 'sfs__k_features': 3} Alternatively , if we can set the \"best grid search parameters\" in our pipeline manually if we ran GridSearchCV with refit=False . It should yield the same results: pipe.set_params(**gs.best_params_).fit(X_train, y_train) print('Best features:', pipe.steps[0][1].k_feature_idx_) Best features: (0, 1, 3)","title":"Obtaining the best k feature indices after GridSearch"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-9-selecting-the-best-feature-combination-in-a-k-range","text":"If k_features is set to to a tuple (min_k, max_k) (new in 0.4.2), the SFS will now select the best feature combination that it discovered by iterating from k=1 to max_k (forward), or max_k to min_k (backward). The size of the returned feature subset is then within max_k to min_k , depending on which combination scored best during cross validation. X.shape (150, 4) from mlxtend.feature_selection import SequentialFeatureSelector as SFS from sklearn.neighbors import KNeighborsClassifier from mlxtend.data import wine_data from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline X, y = wine_data() X_train, X_test, y_train, y_test= train_test_split(X, y, stratify=y, test_size=0.3, random_state=1) knn = KNeighborsClassifier(n_neighbors=2) sfs1 = SFS(estimator=knn, k_features=(3, 10), forward=True, floating=False, scoring='accuracy', cv=5) pipe = make_pipeline(StandardScaler(), sfs1) pipe.fit(X_train, y_train) print('best combination (ACC: %.3f): %s\\n' % (sfs1.k_score_, sfs1.k_feature_idx_)) print('all subsets:\\n', sfs1.subsets_) plot_sfs(sfs1.get_metric_dict(), kind='std_err'); best combination (ACC: 0.992): (0, 1, 2, 3, 6, 8, 9, 10, 11, 12) all subsets: {1: {'feature_idx': (6,), 'cv_scores': array([ 0.84615385, 0.6 , 0.88 , 0.79166667, 0.875 ]), 'avg_score': 0.7985641025641026, 'feature_names': ('6',)}, 2: {'feature_idx': (6, 9), 'cv_scores': array([ 0.92307692, 0.88 , 1. , 0.95833333, 0.91666667]), 'avg_score': 0.93561538461538463, 'feature_names': ('6', '9')}, 3: {'feature_idx': (6, 9, 12), 'cv_scores': array([ 0.92307692, 0.92 , 0.96 , 1. , 0.95833333]), 'avg_score': 0.95228205128205123, 'feature_names': ('6', '9', '12')}, 4: {'feature_idx': (3, 6, 9, 12), 'cv_scores': array([ 0.96153846, 0.96 , 0.96 , 1. , 0.95833333]), 'avg_score': 0.96797435897435891, 'feature_names': ('3', '6', '9', '12')}, 5: {'feature_idx': (3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.96 , 1. , 1. , 1. ]), 'avg_score': 0.97661538461538466, 'feature_names': ('3', '6', '9', '10', '12')}, 6: {'feature_idx': (2, 3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.96 , 1. , 0.95833333, 1. ]), 'avg_score': 0.96828205128205125, 'feature_names': ('2', '3', '6', '9', '10', '12')}, 7: {'feature_idx': (0, 2, 3, 6, 9, 10, 12), 'cv_scores': array([ 0.92307692, 0.92 , 1. , 1. , 1. ]), 'avg_score': 0.96861538461538466, 'feature_names': ('0', '2', '3', '6', '9', '10', '12')}, 8: {'feature_idx': (0, 2, 3, 6, 8, 9, 10, 12), 'cv_scores': array([ 1. , 0.92, 1. , 1. , 1. ]), 'avg_score': 0.98399999999999999, 'feature_names': ('0', '2', '3', '6', '8', '9', '10', '12')}, 9: {'feature_idx': (0, 2, 3, 6, 8, 9, 10, 11, 12), 'cv_scores': array([ 1. , 0.92, 1. , 1. , 1. ]), 'avg_score': 0.98399999999999999, 'feature_names': ('0', '2', '3', '6', '8', '9', '10', '11', '12')}, 10: {'feature_idx': (0, 1, 2, 3, 6, 8, 9, 10, 11, 12), 'cv_scores': array([ 1. , 0.96, 1. , 1. , 1. ]), 'avg_score': 0.99199999999999999, 'feature_names': ('0', '1', '2', '3', '6', '8', '9', '10', '11', '12')}}","title":"Example 9 -- Selecting the \"best\" feature combination in a k-range"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-10-using-other-cross-validation-schemes","text":"In addition to standard k-fold and stratified k-fold, other cross validation schemes can be used with SequentialFeatureSelector . For example, GroupKFold or LeaveOneOut cross-validation from scikit-learn.","title":"Example 10 -- Using other cross-validation schemes"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#using-groupkfold-with-sequentialfeatureselector","text":"from mlxtend.feature_selection import SequentialFeatureSelector as SFS from sklearn.neighbors import KNeighborsClassifier from mlxtend.data import iris_data from sklearn.model_selection import GroupKFold import numpy as np X, y = iris_data() groups = np.arange(len(y)) // 10 print('groups: {}'.format(groups)) groups: [ 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 5 6 6 6 6 6 6 6 6 6 6 7 7 7 7 7 7 7 7 7 7 8 8 8 8 8 8 8 8 8 8 9 9 9 9 9 9 9 9 9 9 10 10 10 10 10 10 10 10 10 10 11 11 11 11 11 11 11 11 11 11 12 12 12 12 12 12 12 12 12 12 13 13 13 13 13 13 13 13 13 13 14 14 14 14 14 14 14 14 14 14] Calling the split() method of a scikit-learn cross-validator object will return a generator that yields train, test splits. cv_gen = GroupKFold(4).split(X, y, groups) cv_gen The cv parameter of SequentialFeatureSelector must be either an int or an iterable yielding train, test splits. This iterable can be constructed by passing the train, test split generator to the built-in list() function. cv = list(cv_gen) knn = KNeighborsClassifier(n_neighbors=2) sfs = SFS(estimator=knn, k_features=2, scoring='accuracy', cv=cv) sfs.fit(X, y) print('best combination (ACC: %.3f): %s\\n' % (sfs.k_score_, sfs.k_feature_idx_)) best combination (ACC: 0.940): (2, 3)","title":"Using GroupKFold with SequentialFeatureSelector"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-11-working-with-pandas-dataframes","text":"","title":"Example 11 - Working with pandas DataFrames"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#example-12-using-pandas-dataframes","text":"Optionally, we can also use pandas DataFrames and pandas Series as input to the fit function. In this case, the column names of the pandas DataFrame will be used as feature names. However, note that if custom_feature_names are provided in the fit function, these custom_feature_names take precedence over the DataFrame column-based feature names. import pandas as pd from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris from mlxtend.feature_selection import SequentialFeatureSelector as SFS iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) sfs1 = SFS(knn, k_features=3, forward=True, floating=False, scoring='accuracy', cv=0) X_df = pd.DataFrame(X, columns=['sepal len', 'petal len', 'sepal width', 'petal width']) X_df.head() .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } sepal len petal len sepal width petal width 0 5.1 3.5 1.4 0.2 1 4.9 3.0 1.4 0.2 2 4.7 3.2 1.3 0.2 3 4.6 3.1 1.5 0.2 4 5.0 3.6 1.4 0.2 Also, the target array, y , can be optionally be cast as a Series: y_series = pd.Series(y) y_series.head() 0 0 1 0 2 0 3 0 4 0 dtype: int64 sfs1 = sfs1.fit(X_df, y_series) Note that the only difference of passing a pandas DataFrame as input is that the sfs1.subsets_ array will now contain a new column, sfs1.subsets_ {1: {'avg_score': 0.95999999999999996, 'cv_scores': array([ 0.96]), 'feature_idx': (3,), 'feature_names': ('petal width',)}, 2: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (2, 3), 'feature_names': ('sepal width', 'petal width')}, 3: {'avg_score': 0.97333333333333338, 'cv_scores': array([ 0.97333333]), 'feature_idx': (1, 2, 3), 'feature_names': ('petal len', 'sepal width', 'petal width')}} In mlxtend version >= 0.13 pandas DataFrames are supported as feature inputs to the SequentianFeatureSelector instead of NumPy arrays or other NumPy-like array types.","title":"Example 12 - Using Pandas DataFrames"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#api","text":"SequentialFeatureSelector(estimator, k_features=1, forward=True, floating=False, verbose=0, scoring=None, cv=5, n_jobs=1, pre_dispatch='2 n_jobs', clone_estimator=True)* Sequential Feature Selection for Classification and Regression. Parameters estimator : scikit-learn classifier or regressor k_features : int or tuple or str (default: 1) Number of features to select, where k_features < the full feature set. New in 0.4.2: A tuple containing a min and max value can be provided, and the SFS will consider return any feature combination between min and max that scored highest in cross-validtion. For example, the tuple (1, 4) will return any combination from 1 up to 4 features instead of a fixed number of features k. New in 0.8.0: A string argument \"best\" or \"parsimonious\". If \"best\" is provided, the feature selector will return the feature subset with the best cross-validation performance. If \"parsimonious\" is provided as an argument, the smallest feature subset that is within one standard error of the cross-validation performance will be selected. forward : bool (default: True) Forward selection if True, backward selection otherwise floating : bool (default: False) Adds a conditional exclusion/inclusion if True. verbose : int (default: 0), level of verbosity to use in logging. If 0, no output, if 1 number of features in current set, if 2 detailed logging i ncluding timestamp and cv scores at step. scoring : str, callable, or None (default: None) If None (default), uses 'accuracy' for sklearn classifiers and 'r2' for sklearn regressors. If str, uses a sklearn scoring metric string identifier, for example {accuracy, f1, precision, recall, roc_auc} for classifiers, {'mean_absolute_error', 'mean_squared_error'/'neg_mean_squared_error', 'median_absolute_error', 'r2'} for regressors. If a callable object or function is provided, it has to be conform with sklearn's signature scorer(estimator, X, y) ; see http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html for more information. cv : int (default: 5) Integer or iterable yielding train, test splits. If cv is an integer and estimator is a classifier (or y consists of integer class labels) stratified k-fold. Otherwise regular k-fold cross-validation is performed. No cross-validation if cv is None, False, or 0. n_jobs : int (default: 1) The number of CPUs to use for evaluating different feature subsets in parallel. -1 means 'all CPUs'. pre_dispatch : int, or string (default: '2*n_jobs') Controls the number of jobs that get dispatched during parallel execution if n_jobs > 1 or n_jobs=-1 . Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs An int, giving the exact number of total jobs that are spawned A string, giving an expression as a function of n_jobs, as in 2*n_jobs clone_estimator : bool (default: True) Clones estimator if True; works with the original estimator instance if False. Set to False if the estimator doesn't implement scikit-learn's set_params and get_params methods. In addition, it is required to set cv=0, and n_jobs=1. Attributes k_feature_idx_ : array-like, shape = [n_predictions] Feature Indices of the selected feature subsets. k_feature_names_ : array-like, shape = [n_predictions] Feature names of the selected feature subsets. If pandas DataFrames are used in the fit method, the feature names correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. New in v 0.13.0. k_score_ : float Cross validation average score of the selected subset. subsets_ : dict A dictionary of selected feature subsets during the sequential selection, where the dictionary keys are the lengths k of these feature subsets. The dictionary values are dictionaries themselves with the following keys: 'feature_idx' (tuple of indices of the feature subset) 'feature_names' (tuple of feature names of the feat. subset) 'cv_scores' (list individual cross-validation scores) 'avg_score' (average cross-validation score) Note that if pandas DataFrames are used in the fit method, the 'feature_names' correspond to the column names. Otherwise, the feature names are string representation of the feature array indices. The 'feature_names' is new in v 0.13.0. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/","title":"API"},{"location":"user_guide/feature_selection/SequentialFeatureSelector/#methods","text":"fit(X, y, custom_feature_names=None, fit_params) Perform feature selection and learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: pandas DataFrames are now also accepted as argument for y. custom_feature_names : None or tuple (default: tuple) Custom feature names for self.k_feature_names and self.subsets_[i]['feature_names'] . (new in v 0.13.0) fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns self : object fit_transform(X, y, fit_params) Fit to training data then reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. y : array-like, shape = [n_samples] Target values. New in v 0.13.0: a pandas Series are now also accepted as argument for y. fit_params : dict of string -> object, optional Parameters to pass to to the fit method of classifier. Returns Reduced feature subset of X, shape={n_samples, k_features} get_metric_dict(confidence_interval=0.95) Return metric dictionary Parameters confidence_interval : float (default: 0.95) A positive float between 0.0 and 1.0 to compute the confidence interval bounds of the CV score averages. Returns Dictionary with items where each dictionary value is a list with the number of iterations (number of feature subsets) as its length. The dictionary keys corresponding to these lists are as follows: 'feature_idx': tuple of the indices of the feature subset 'cv_scores': list with individual CV scores 'avg_score': of CV average scores 'std_dev': standard deviation of the CV score average 'std_err': standard error of the CV score average 'ci_bound': confidence interval bound of the CV score average get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X) Reduce X to its most important features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. New in v 0.13.0: pandas DataFrames are now also accepted as argument for X. Returns Reduced feature subset of X, shape={n_samples, k_features}","title":"Methods"},{"location":"user_guide/file_io/find_filegroups/","text":"Find Filegroups A function that finds files that belong together (i.e., differ only by file extension) in different directories and collects them in a Python dictionary for further processing tasks. from mlxtend.file_io import find_filegroups Overview This function finds files that are related to each other based on their file names. This can be useful for parsing collections files that have been stored in different subdirectories, for examples: input_dir/ task01.txt task02.txt ... log_dir/ task01.log task02.log ... output_dir/ task01.dat task02.dat ... References - Example 1 - Grouping related files in a dictionary Given the following directory and file structure dir_1/ file_1.log file_2.log file_3.log dir_2/ file_1.csv file_2.csv file_3.csv dir_3/ file_1.txt file_2.txt file_3.txt we can use find_filegroups to group related files as items of a dictionary as shown below: from mlxtend.file_io import find_filegroups find_filegroups(paths=['./data_find_filegroups/dir_1', './data_find_filegroups/dir_2', './data_find_filegroups/dir_3'], substring='file_') {'file_1': ['./data_find_filegroups/dir_1/file_1.log', './data_find_filegroups/dir_2/file_1.csv', './data_find_filegroups/dir_3/file_1.txt'], 'file_2': ['./data_find_filegroups/dir_1/file_2.log', './data_find_filegroups/dir_2/file_2.csv', './data_find_filegroups/dir_3/file_2.txt'], 'file_3': ['./data_find_filegroups/dir_1/file_3.log', './data_find_filegroups/dir_2/file_3.csv', './data_find_filegroups/dir_3/file_3.txt']} API find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/","title":"Find Filegroups"},{"location":"user_guide/file_io/find_filegroups/#find-filegroups","text":"A function that finds files that belong together (i.e., differ only by file extension) in different directories and collects them in a Python dictionary for further processing tasks. from mlxtend.file_io import find_filegroups","title":"Find Filegroups"},{"location":"user_guide/file_io/find_filegroups/#overview","text":"This function finds files that are related to each other based on their file names. This can be useful for parsing collections files that have been stored in different subdirectories, for examples: input_dir/ task01.txt task02.txt ... log_dir/ task01.log task02.log ... output_dir/ task01.dat task02.dat ...","title":"Overview"},{"location":"user_guide/file_io/find_filegroups/#references","text":"-","title":"References"},{"location":"user_guide/file_io/find_filegroups/#example-1-grouping-related-files-in-a-dictionary","text":"Given the following directory and file structure dir_1/ file_1.log file_2.log file_3.log dir_2/ file_1.csv file_2.csv file_3.csv dir_3/ file_1.txt file_2.txt file_3.txt we can use find_filegroups to group related files as items of a dictionary as shown below: from mlxtend.file_io import find_filegroups find_filegroups(paths=['./data_find_filegroups/dir_1', './data_find_filegroups/dir_2', './data_find_filegroups/dir_3'], substring='file_') {'file_1': ['./data_find_filegroups/dir_1/file_1.log', './data_find_filegroups/dir_2/file_1.csv', './data_find_filegroups/dir_3/file_1.txt'], 'file_2': ['./data_find_filegroups/dir_1/file_2.log', './data_find_filegroups/dir_2/file_2.csv', './data_find_filegroups/dir_3/file_2.txt'], 'file_3': ['./data_find_filegroups/dir_1/file_3.log', './data_find_filegroups/dir_2/file_3.csv', './data_find_filegroups/dir_3/file_3.txt']}","title":"Example 1 - Grouping related files in a dictionary"},{"location":"user_guide/file_io/find_filegroups/#api","text":"find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None) Find and collect files from different directories in a python dictionary. Parameters paths : list Paths of the directories to be searched. Dictionary keys are build from the first directory. substring : str (default: '') Substring that all files have to contain to be considered. extensions : list (default: None) None or list of allowed file extensions for each path. If provided, the number of extensions must match the number of paths . validity_check : bool (default: None) If True , checks if all dictionary values have the same number of file paths. Prints a warning and returns an empty dictionary if the validity check failed. ignore_invisible : bool (default: True) If True , ignores invisible files (i.e., files starting with a period). rstrip : str (default: '') If provided, strips characters from right side of the file base names after splitting the extension. Useful to trim different filenames to a common stem. E.g,. \"abc_d.txt\" and \"abc_d_.csv\" would share the stem \"abc_d\" if rstrip is set to \"_\". ignore_substring : str (default: None) Ignores files that contain the specified substring. Returns groups : dict Dictionary of files paths. Keys are the file names found in the first directory listed in paths (without file extension). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/","title":"API"},{"location":"user_guide/file_io/find_files/","text":"Find Files A function that finds files in a given directory based on substring matches and returns a list of the file names found. from mlxtend.file_io import find_files Overview This function finds files based on substring search. This is especially useful if we want to find specific files in a directory tree and return their absolute paths for further processing in Python. References - Example 1 - Grouping related files in a dictionary Given the following directory and file structure dir_1/ file_1.log file_2.log file_3.log dir_2/ file_1.csv file_2.csv file_3.csv dir_3/ file_1.txt file_2.txt file_3.txt we can use find_files to return the paths to all files that contain the substring _2 as follows: from mlxtend.file_io import find_files find_files(substring='_2', path='./data_find_filegroups/', recursive=True) ['./data_find_filegroups/dir_1/file_2.log', './data_find_filegroups/dir_2/file_2.csv', './data_find_filegroups/dir_3/file_2.txt'] API find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"Find Files"},{"location":"user_guide/file_io/find_files/#find-files","text":"A function that finds files in a given directory based on substring matches and returns a list of the file names found. from mlxtend.file_io import find_files","title":"Find Files"},{"location":"user_guide/file_io/find_files/#overview","text":"This function finds files based on substring search. This is especially useful if we want to find specific files in a directory tree and return their absolute paths for further processing in Python.","title":"Overview"},{"location":"user_guide/file_io/find_files/#references","text":"-","title":"References"},{"location":"user_guide/file_io/find_files/#example-1-grouping-related-files-in-a-dictionary","text":"Given the following directory and file structure dir_1/ file_1.log file_2.log file_3.log dir_2/ file_1.csv file_2.csv file_3.csv dir_3/ file_1.txt file_2.txt file_3.txt we can use find_files to return the paths to all files that contain the substring _2 as follows: from mlxtend.file_io import find_files find_files(substring='_2', path='./data_find_filegroups/', recursive=True) ['./data_find_filegroups/dir_1/file_2.log', './data_find_filegroups/dir_2/file_2.csv', './data_find_filegroups/dir_3/file_2.txt']","title":"Example 1 - Grouping related files in a dictionary"},{"location":"user_guide/file_io/find_files/#api","text":"find_files(substring, path, recursive=False, check_ext=None, ignore_invisible=True, ignore_substring=None) Find files in a directory based on substring matching. Parameters substring : str Substring of the file to be matched. path : str Path where to look. recursive : bool If true, searches subdirectories recursively. check_ext : str If string (e.g., '.txt'), only returns files that match the specified file extension. ignore_invisible : bool If True , ignores invisible files (i.e., files starting with a period). ignore_substring : str Ignores files that contain the specified substring. Returns results : list List of the matched files. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/","title":"API"},{"location":"user_guide/frequent_patterns/apriori/","text":"Frequent Itemsets via Apriori Algorithm Apriori function to extract frequent itemsets for association rule mining from mlxtend.frequent_patterns import apriori Overview Apriori is a popular algorithm [1] for extracting frequent itemsets with applications in association rule learning. The apriori algorithm has been designed to operate on databases containing transactions, such as purchases by customers of a store. An itemset is considered as \"frequent\" if it meets a user-specified support threshold. For instance, if the support threshold is set to 0.5 (50%), a frequent itemset is defined as a set of items that occur together in at least 50% of all transactions in the database. References [1] Agrawal, Rakesh, and Ramakrishnan Srikant. \" Fast algorithms for mining association rules .\" Proc. 20th int. conf. very large data bases, VLDB. Vol. 1215. 1994. Example 1 -- Generating Frequent Itemsets The apriori function expects data in a one-hot encoded pandas DataFrame. Suppose we have the following transaction data: dataset = [['Milk', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Dill', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Milk', 'Apple', 'Kidney Beans', 'Eggs'], ['Milk', 'Unicorn', 'Corn', 'Kidney Beans', 'Yogurt'], ['Corn', 'Onion', 'Onion', 'Kidney Beans', 'Ice cream', 'Eggs']] We can transform it into the right format via the TransactionEncoder as follows: import pandas as pd from mlxtend.preprocessing import TransactionEncoder te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) df = pd.DataFrame(te_ary, columns=te.columns_) df .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Corn Dill Eggs Ice cream Kidney Beans Milk Nutmeg Onion Unicorn Yogurt 0 False False False True False True True True True False True 1 False False True True False True False True True False True 2 True False False True False True True False False False False 3 False True False False False True True False False True True 4 False True False True True True False False True False False Now, let us return the items and itemsets with at least 60% support: from mlxtend.frequent_patterns import apriori apriori(df, min_support=0.6) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (3) 1 1.0 (5) 2 0.6 (6) 3 0.6 (8) 4 0.6 (10) 5 0.8 (3, 5) 6 0.6 (8, 3) 7 0.6 (5, 6) 8 0.6 (8, 5) 9 0.6 (10, 5) 10 0.6 (8, 3, 5) By default, apriori returns the column indices of the items, which may be useful in downstream operations such as association rule mining. For better readability, we can set use_colnames=True to convert these integer values into the respective item names: apriori(df, min_support=0.6, use_colnames=True) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Eggs, Kidney Beans) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Eggs, Kidney Beans) Example 2 -- Selecting and Filtering Results The advantage of working with pandas DataFrames is that we can use its convenient features to filter the results. For instance, let's assume we are only interested in itemsets of length 2 that have a support of at least 80 percent. First, we create the frequent itemsets via apriori and add a new column that stores the length of each itemset: frequent_itemsets = apriori(df, min_support=0.6, use_colnames=True) frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x)) frequent_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 0 0.8 (Eggs) 1 1 1.0 (Kidney Beans) 1 2 0.6 (Milk) 1 3 0.6 (Onion) 1 4 0.6 (Yogurt) 1 5 0.8 (Eggs, Kidney Beans) 2 6 0.6 (Onion, Eggs) 2 7 0.6 (Milk, Kidney Beans) 2 8 0.6 (Onion, Kidney Beans) 2 9 0.6 (Kidney Beans, Yogurt) 2 10 0.6 (Onion, Eggs, Kidney Beans) 3 Then, we can select the results that satisfy our desired criteria as follows: frequent_itemsets[ (frequent_itemsets['length'] == 2) & (frequent_itemsets['support'] >= 0.8) ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 5 0.8 (Eggs, Kidney Beans) 2 Similarly, using the Pandas API, we can select entries based on the \"itemsets\" column: frequent_itemsets[ frequent_itemsets['itemsets'] == {'Onion', 'Eggs'} ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 6 0.6 (Onion, Eggs) 2 Frozensets Note that the entries in the \"itemsets\" column are of type frozenset , which is built-in Python type that is similar to a Python set but immutable, which makes it more efficient for certain query or comparison operations (https://docs.python.org/3.6/library/stdtypes.html#frozenset). Since frozenset s are sets, the item order does not matter. I.e., the query frequent_itemsets[ frequent_itemsets['itemsets'] == {'Onion', 'Eggs'} ] is equivalent to any of the following three frequent_itemsets[ frequent_itemsets['itemsets'] == {'Eggs', 'Onion'} ] frequent_itemsets[ frequent_itemsets['itemsets'] == frozenset(('Eggs', 'Onion')) ] frequent_itemsets[ frequent_itemsets['itemsets'] == frozenset(('Onion', 'Eggs')) ] Example 3 -- Working with Sparse Representations To save memory, you may want to represent your transaction data in the sparse format. This is especially useful if you have lots of products and small transactions. oht_ary = te.fit(dataset).transform(dataset, sparse=True) sparse_df = pd.SparseDataFrame(te_ary, columns=te.columns_, default_fill_value=False) sparse_df .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Corn Dill Eggs Ice cream Kidney Beans Milk Nutmeg Onion Unicorn Yogurt 0 False False False True False True True True True False True 1 False False True True False True False True True False True 2 True False False True False True True False False False False 3 False True False False False True True False False True True 4 False True False True True True False False True False False apriori(sparse_df, min_support=0.6, use_colnames=True) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Eggs, Kidney Beans) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Eggs, Kidney Beans) API apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/","title":"Apriori"},{"location":"user_guide/frequent_patterns/apriori/#frequent-itemsets-via-apriori-algorithm","text":"Apriori function to extract frequent itemsets for association rule mining from mlxtend.frequent_patterns import apriori","title":"Frequent Itemsets via Apriori Algorithm"},{"location":"user_guide/frequent_patterns/apriori/#overview","text":"Apriori is a popular algorithm [1] for extracting frequent itemsets with applications in association rule learning. The apriori algorithm has been designed to operate on databases containing transactions, such as purchases by customers of a store. An itemset is considered as \"frequent\" if it meets a user-specified support threshold. For instance, if the support threshold is set to 0.5 (50%), a frequent itemset is defined as a set of items that occur together in at least 50% of all transactions in the database.","title":"Overview"},{"location":"user_guide/frequent_patterns/apriori/#references","text":"[1] Agrawal, Rakesh, and Ramakrishnan Srikant. \" Fast algorithms for mining association rules .\" Proc. 20th int. conf. very large data bases, VLDB. Vol. 1215. 1994.","title":"References"},{"location":"user_guide/frequent_patterns/apriori/#example-1-generating-frequent-itemsets","text":"The apriori function expects data in a one-hot encoded pandas DataFrame. Suppose we have the following transaction data: dataset = [['Milk', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Dill', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Milk', 'Apple', 'Kidney Beans', 'Eggs'], ['Milk', 'Unicorn', 'Corn', 'Kidney Beans', 'Yogurt'], ['Corn', 'Onion', 'Onion', 'Kidney Beans', 'Ice cream', 'Eggs']] We can transform it into the right format via the TransactionEncoder as follows: import pandas as pd from mlxtend.preprocessing import TransactionEncoder te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) df = pd.DataFrame(te_ary, columns=te.columns_) df .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Corn Dill Eggs Ice cream Kidney Beans Milk Nutmeg Onion Unicorn Yogurt 0 False False False True False True True True True False True 1 False False True True False True False True True False True 2 True False False True False True True False False False False 3 False True False False False True True False False True True 4 False True False True True True False False True False False Now, let us return the items and itemsets with at least 60% support: from mlxtend.frequent_patterns import apriori apriori(df, min_support=0.6) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (3) 1 1.0 (5) 2 0.6 (6) 3 0.6 (8) 4 0.6 (10) 5 0.8 (3, 5) 6 0.6 (8, 3) 7 0.6 (5, 6) 8 0.6 (8, 5) 9 0.6 (10, 5) 10 0.6 (8, 3, 5) By default, apriori returns the column indices of the items, which may be useful in downstream operations such as association rule mining. For better readability, we can set use_colnames=True to convert these integer values into the respective item names: apriori(df, min_support=0.6, use_colnames=True) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Eggs, Kidney Beans) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Eggs, Kidney Beans)","title":"Example 1 -- Generating Frequent Itemsets"},{"location":"user_guide/frequent_patterns/apriori/#example-2-selecting-and-filtering-results","text":"The advantage of working with pandas DataFrames is that we can use its convenient features to filter the results. For instance, let's assume we are only interested in itemsets of length 2 that have a support of at least 80 percent. First, we create the frequent itemsets via apriori and add a new column that stores the length of each itemset: frequent_itemsets = apriori(df, min_support=0.6, use_colnames=True) frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x)) frequent_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 0 0.8 (Eggs) 1 1 1.0 (Kidney Beans) 1 2 0.6 (Milk) 1 3 0.6 (Onion) 1 4 0.6 (Yogurt) 1 5 0.8 (Eggs, Kidney Beans) 2 6 0.6 (Onion, Eggs) 2 7 0.6 (Milk, Kidney Beans) 2 8 0.6 (Onion, Kidney Beans) 2 9 0.6 (Kidney Beans, Yogurt) 2 10 0.6 (Onion, Eggs, Kidney Beans) 3 Then, we can select the results that satisfy our desired criteria as follows: frequent_itemsets[ (frequent_itemsets['length'] == 2) & (frequent_itemsets['support'] >= 0.8) ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 5 0.8 (Eggs, Kidney Beans) 2 Similarly, using the Pandas API, we can select entries based on the \"itemsets\" column: frequent_itemsets[ frequent_itemsets['itemsets'] == {'Onion', 'Eggs'} ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets length 6 0.6 (Onion, Eggs) 2 Frozensets Note that the entries in the \"itemsets\" column are of type frozenset , which is built-in Python type that is similar to a Python set but immutable, which makes it more efficient for certain query or comparison operations (https://docs.python.org/3.6/library/stdtypes.html#frozenset). Since frozenset s are sets, the item order does not matter. I.e., the query frequent_itemsets[ frequent_itemsets['itemsets'] == {'Onion', 'Eggs'} ] is equivalent to any of the following three frequent_itemsets[ frequent_itemsets['itemsets'] == {'Eggs', 'Onion'} ] frequent_itemsets[ frequent_itemsets['itemsets'] == frozenset(('Eggs', 'Onion')) ] frequent_itemsets[ frequent_itemsets['itemsets'] == frozenset(('Onion', 'Eggs')) ]","title":"Example 2 -- Selecting and Filtering Results"},{"location":"user_guide/frequent_patterns/apriori/#example-3-working-with-sparse-representations","text":"To save memory, you may want to represent your transaction data in the sparse format. This is especially useful if you have lots of products and small transactions. oht_ary = te.fit(dataset).transform(dataset, sparse=True) sparse_df = pd.SparseDataFrame(te_ary, columns=te.columns_, default_fill_value=False) sparse_df .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Corn Dill Eggs Ice cream Kidney Beans Milk Nutmeg Onion Unicorn Yogurt 0 False False False True False True True True True False True 1 False False True True False True False True True False True 2 True False False True False True True False False False False 3 False True False False False True True False False True True 4 False True False True True True False False True False False apriori(sparse_df, min_support=0.6, use_colnames=True) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Eggs, Kidney Beans) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Eggs, Kidney Beans)","title":"Example 3 -- Working with Sparse Representations"},{"location":"user_guide/frequent_patterns/apriori/#api","text":"apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1) Get frequent itemsets from a one-hot DataFrame Parameters df : pandas DataFrame or pandas SparseDataFrame pandas DataFrame the encoded format. The allowed values are either 0/1 or True/False. For example, Apple Bananas Beer Chicken Milk Rice 0 1 0 1 1 0 1 1 1 0 1 0 0 1 2 1 0 1 0 0 0 3 1 1 0 0 0 0 4 0 0 1 1 1 1 5 0 0 1 0 1 1 6 0 0 1 0 1 0 7 1 1 0 0 0 0 min_support : float (default: 0.5) A float between 0 and 1 for minumum support of the itemsets returned. The support is computed as the fraction transactions_where_item(s)_occur / total_transactions. use_colnames : bool (default: False) If true, uses the DataFrames' column names in the returned DataFrame instead of column indices. max_len : int (default: None) Maximum length of the itemsets generated. If None (default) all possible itemsets lengths (under the apriori condition) are evaluated. Returns pandas DataFrame with columns ['support', 'itemsets'] of all itemsets that are >= min_support and < than max_len (if max_len is not None). Each itemset in the 'itemsets' column is of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/","title":"API"},{"location":"user_guide/frequent_patterns/association_rules/","text":"Association Rules Generation from Frequent Itemsets Function to generate association rules from frequent itemsets from mlxtend.frequent_patterns import association_rules Overview Rule generation is a common task in the mining of frequent patterns. An association rule is an implication expression of the form X \\rightarrow Y , where X and Y are disjoint itemsets [1]. A more concrete example based on consumer behaviour would be \\{Diapers\\} \\rightarrow \\{Beer\\} suggesting that people who buy diapers are also likely to buy beer. To evaluate the \"interest\" of such an association rule, different metrics have been developed. The current implementation make use of the confidence and lift metrics. Metrics The currently supported metrics for evaluating association rules and setting selection thresholds are listed below. Given a rule \"A -> C\", A stands for antecedent and C stands for consequent. 'support': \\text{support}(A\\rightarrow C) = \\text{support}(A \\cup C), \\;\\;\\; \\text{range: } [0, 1] introduced in [3] The support metric is defined for itemsets, not assocication rules. The table produced by the association rule mining algorithm contains three different support metrics: 'antecedent support', 'consequent support', and 'support'. Here, 'antecedent support' computes the proportion of transactions that contain the antecedent A, and 'consequent support' computes the support for the itemset of the consequent C. The 'support' metric then computes the support of the combined itemset A \\cup C -- note that 'support' depends on 'antecedent support' and 'consequent support' via min('antecedent support', 'consequent support'). Typically, support is used to measure the abundance or frequency (often interpreted as significance or importance) of an itemset in a database. We refer to an itemset as a \"frequent itemset\" if you support is larger than a specified minimum-support threshold. Note that in general, due to the downward closure property, all subsets of a frequent itemset are also frequent. 'confidence': \\text{confidence}(A\\rightarrow C) = \\frac{\\text{support}(A\\rightarrow C)}{\\text{support}(A)}, \\;\\;\\; \\text{range: } [0, 1] introduced in [3] The confidence of a rule A->C is the probability of seeing the consequent in a transaction given that it also contains the antecedent. Note that the metric is not symmetric or directed; for instance, the confidence for A->C is different than the confidence for C->A. The confidence is 1 (maximal) for a rule A->C if the consequent and antecedent always occur together. 'lift': \\text{lift}(A\\rightarrow C) = \\frac{\\text{confidence}(A\\rightarrow C)}{\\text{support}(C)}, \\;\\;\\; \\text{range: } [0, \\infty] introduced in [4] The lift metric is commonly used to measure how much more often the antecedent and consequent of a rule A->C occur together than we would expect if they were statistically independent. If A and C are independent, the Lift score will be exactly 1. 'leverage': \\text{levarage}(A\\rightarrow C) = \\text{support}(A\\rightarrow C) - \\text{support}(A) \\times \\text{support}(C), \\;\\;\\; \\text{range: } [-1, 1] introduced in [5] Leverage computes the difference between the observed frequency of A and C appearing together and the frequency that would be expected if A and C were independent. An leverage value of 0 indicates independence. 'conviction': \\text{conviction}(A\\rightarrow C) = \\frac{1 - \\text{support}(C)}{1 - \\text{confidence}(A\\rightarrow C)}, \\;\\;\\; \\text{range: } [0, \\infty] introduced in [6] A high conviction value means that the consequent is highly depending on the antecedent. For instance, in the case of a perfect confidence score, the denominator becomes 0 (due to 1 - 1) for which the conviction score is defined as 'inf'. Similar to lift, if items are independent, the conviction is 1. References [1] Tan, Steinbach, Kumar. Introduction to Data Mining. Pearson New International Edition. Harlow: Pearson Education Ltd., 2014. (pp. 327-414). [2] Michael Hahsler, http://michael.hahsler.net/research/association_rules/measures.html [3] R. Agrawal, T. Imielinski, and A. Swami. Mining associations between sets of items in large databases. In Proc. of the ACM SIGMOD Int'l Conference on Management of Data, pages 207-216, Washington D.C., May 1993 [4] S. Brin, R. Motwani, J. D. Ullman, and S. Tsur. Dynamic itemset counting and implication rules for market basket data [5] Piatetsky-Shapiro, G., Discovery, analysis, and presentation of strong rules. Knowledge Discovery in Databases, 1991: p. 229-248. [6] Sergey Brin, Rajeev Motwani, Jeffrey D. Ullman, and Shalom Turk. Dynamic itemset counting and implication rules for market basket data. In SIGMOD 1997, Proceedings ACM SIGMOD International Conference on Management of Data, pages 255-264, Tucson, Arizona, USA, May 1997 Example 1 -- Generating Association Rules from Frequent Itemsets The generate_rules takes dataframes of frequent itemsets as produced by the apriori function in mlxtend.association . To demonstrate the usage of the generate_rules method, we first create a pandas DataFrame of frequent itemsets as generated by the apriori function: import pandas as pd from mlxtend.preprocessing import TransactionEncoder from mlxtend.frequent_patterns import apriori dataset = [['Milk', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Dill', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Milk', 'Apple', 'Kidney Beans', 'Eggs'], ['Milk', 'Unicorn', 'Corn', 'Kidney Beans', 'Yogurt'], ['Corn', 'Onion', 'Onion', 'Kidney Beans', 'Ice cream', 'Eggs']] te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) df = pd.DataFrame(te_ary, columns=te.columns_) frequent_itemsets = apriori(df, min_support=0.6, use_colnames=True) frequent_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Kidney Beans, Eggs) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Kidney Beans, Eggs) The generate_rules() function allows you to (1) specify your metric of interest and (2) the according threshold. Currently implemented measures are confidence and lift . Let's say you are interesting in rules derived from the frequent itemsets only if the level of confidence is above the 90 percent threshold ( min_threshold=0.7 ): from mlxtend.frequent_patterns import association_rules association_rules(frequent_itemsets, metric=\"confidence\", min_threshold=0.7) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (Kidney Beans) (Eggs) 1.0 0.8 0.8 0.80 1.00 0.00 1.000000 1 (Eggs) (Kidney Beans) 0.8 1.0 0.8 1.00 1.00 0.00 inf 2 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 3 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 4 (Milk) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 5 (Onion) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 6 (Yogurt) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 7 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 8 (Onion, Eggs) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 9 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 10 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 11 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 Example 2 -- Rule Generation and Selection Criteria If you are interested in rules according to a different metric of interest, you can simply adjust the metric and min_threshold arguments . E.g. if you are only interested in rules that have a lift score of >= 1.2, you would do the following: rules = association_rules(frequent_itemsets, metric=\"lift\", min_threshold=1.2) rules .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 4 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 5 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 Pandas DataFrames make it easy to filter the results further. Let's say we are ony interested in rules that satisfy the following criteria: at least 2 antecedents a confidence > 0.75 a lift score > 1.2 We could compute the antecedent length as follows: rules[\"antecedent_len\"] = rules[\"antecedents\"].apply(lambda x: len(x)) rules .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 0 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 1 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 1 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 2 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 2 4 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 5 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 1 Then, we can use pandas' selection syntax as shown below: rules[ (rules['antecedent_len'] >= 2) & (rules['confidence'] > 0.75) & (rules['lift'] > 1.2) ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.0 1.25 0.12 inf 2 Similarly, using the Pandas API, we can select entries based on the \"antecedents\" or \"consequents\" columns: rules[rules['antecedents'] == {'Eggs', 'Kidney Beans'}] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.6 2 Frozensets Note that the entries in the \"itemsets\" column are of type frozenset , which is built-in Python type that is similar to a Python set but immutable, which makes it more efficient for certain query or comparison operations (https://docs.python.org/3.6/library/stdtypes.html#frozenset). Since frozenset s are sets, the item order does not matter. I.e., the query rules[rules['antecedents'] == {'Eggs', 'Kidney Beans'}] is equivalent to any of the following three rules[rules['antecedents'] == {'Kidney Beans', 'Eggs'}] rules[rules['antecedents'] == frozenset(('Eggs', 'Kidney Beans'))] rules[rules['antecedents'] == frozenset(('Kidney Beans', 'Eggs'))] Example 3 -- Frequent Itemsets with Incomplete Antecedent and Consequent Information Most metrics computed by association_rules depends on the consequent and antecedent support score of a given rule provided in the frequent itemset input DataFrame. Consider the following example: import pandas as pd dict = {'itemsets': [['177', '176'], ['177', '179'], ['176', '178'], ['176', '179'], ['93', '100'], ['177', '178'], ['177', '176', '178']], 'support':[0.253623, 0.253623, 0.217391, 0.217391, 0.181159, 0.108696, 0.108696]} freq_itemsets = pd.DataFrame(dict) freq_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } itemsets support 0 [177, 176] 0.253623 1 [177, 179] 0.253623 2 [176, 178] 0.217391 3 [176, 179] 0.217391 4 [93, 100] 0.181159 5 [177, 178] 0.108696 6 [177, 176, 178] 0.108696 Note that this is a \"cropped\" DataFrame that doesn't contain the support values of the item subsets. This can create problems if we want to compute the association rule metrics for, e.g., 176 => 177 . For example, the confidence is computed as \\text{confidence}(A\\rightarrow C) = \\frac{\\text{support}(A\\rightarrow C)}{\\text{support}(A)}, \\;\\;\\; \\text{range: } [0, 1] But we do not have \\text{support}(A) . All we know about \"A\"'s support is that it is at least 0.253623. In these scenarios, where not all metric's can be computed, due to incomplete input DataFrames, you can use the support_only=True option, which will only compute the support column of a given rule that does not require as much info: \\text{support}(A\\rightarrow C) = \\text{support}(A \\cup C), \\;\\;\\; \\text{range: } [0, 1] \"NaN's\" will be assigned to all other metric columns: from mlxtend.frequent_patterns import association_rules res = association_rules(freq_itemsets, support_only=True, min_threshold=0.1) res .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (176) (177) NaN NaN 0.253623 NaN NaN NaN NaN 1 (177) (176) NaN NaN 0.253623 NaN NaN NaN NaN 2 (179) (177) NaN NaN 0.253623 NaN NaN NaN NaN 3 (177) (179) NaN NaN 0.253623 NaN NaN NaN NaN 4 (176) (178) NaN NaN 0.217391 NaN NaN NaN NaN 5 (178) (176) NaN NaN 0.217391 NaN NaN NaN NaN 6 (179) (176) NaN NaN 0.217391 NaN NaN NaN NaN 7 (176) (179) NaN NaN 0.217391 NaN NaN NaN NaN 8 (93) (100) NaN NaN 0.181159 NaN NaN NaN NaN 9 (100) (93) NaN NaN 0.181159 NaN NaN NaN NaN 10 (177) (178) NaN NaN 0.108696 NaN NaN NaN NaN 11 (178) (177) NaN NaN 0.108696 NaN NaN NaN NaN 12 (176, 177) (178) NaN NaN 0.108696 NaN NaN NaN NaN 13 (176, 178) (177) NaN NaN 0.108696 NaN NaN NaN NaN 14 (177, 178) (176) NaN NaN 0.108696 NaN NaN NaN NaN 15 (176) (177, 178) NaN NaN 0.108696 NaN NaN NaN NaN 16 (177) (176, 178) NaN NaN 0.108696 NaN NaN NaN NaN 17 (178) (176, 177) NaN NaN 0.108696 NaN NaN NaN NaN To clean up the representation, you may want to do the following: res = res[['antecedents', 'consequents', 'support']] res .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents support 0 (176) (177) 0.253623 1 (177) (176) 0.253623 2 (179) (177) 0.253623 3 (177) (179) 0.253623 4 (176) (178) 0.217391 5 (178) (176) 0.217391 6 (179) (176) 0.217391 7 (176) (179) 0.217391 8 (93) (100) 0.181159 9 (100) (93) 0.181159 10 (177) (178) 0.108696 11 (178) (177) 0.108696 12 (176, 177) (178) 0.108696 13 (176, 178) (177) 0.108696 14 (177, 178) (176) 0.108696 15 (176) (177, 178) 0.108696 16 (177) (176, 178) 0.108696 17 (178) (176, 177) 0.108696 API association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"Association rules"},{"location":"user_guide/frequent_patterns/association_rules/#association-rules-generation-from-frequent-itemsets","text":"Function to generate association rules from frequent itemsets from mlxtend.frequent_patterns import association_rules","title":"Association Rules Generation from Frequent Itemsets"},{"location":"user_guide/frequent_patterns/association_rules/#overview","text":"Rule generation is a common task in the mining of frequent patterns. An association rule is an implication expression of the form X \\rightarrow Y , where X and Y are disjoint itemsets [1]. A more concrete example based on consumer behaviour would be \\{Diapers\\} \\rightarrow \\{Beer\\} suggesting that people who buy diapers are also likely to buy beer. To evaluate the \"interest\" of such an association rule, different metrics have been developed. The current implementation make use of the confidence and lift metrics.","title":"Overview"},{"location":"user_guide/frequent_patterns/association_rules/#metrics","text":"The currently supported metrics for evaluating association rules and setting selection thresholds are listed below. Given a rule \"A -> C\", A stands for antecedent and C stands for consequent.","title":"Metrics"},{"location":"user_guide/frequent_patterns/association_rules/#support","text":"\\text{support}(A\\rightarrow C) = \\text{support}(A \\cup C), \\;\\;\\; \\text{range: } [0, 1] introduced in [3] The support metric is defined for itemsets, not assocication rules. The table produced by the association rule mining algorithm contains three different support metrics: 'antecedent support', 'consequent support', and 'support'. Here, 'antecedent support' computes the proportion of transactions that contain the antecedent A, and 'consequent support' computes the support for the itemset of the consequent C. The 'support' metric then computes the support of the combined itemset A \\cup C -- note that 'support' depends on 'antecedent support' and 'consequent support' via min('antecedent support', 'consequent support'). Typically, support is used to measure the abundance or frequency (often interpreted as significance or importance) of an itemset in a database. We refer to an itemset as a \"frequent itemset\" if you support is larger than a specified minimum-support threshold. Note that in general, due to the downward closure property, all subsets of a frequent itemset are also frequent.","title":"'support':"},{"location":"user_guide/frequent_patterns/association_rules/#confidence","text":"\\text{confidence}(A\\rightarrow C) = \\frac{\\text{support}(A\\rightarrow C)}{\\text{support}(A)}, \\;\\;\\; \\text{range: } [0, 1] introduced in [3] The confidence of a rule A->C is the probability of seeing the consequent in a transaction given that it also contains the antecedent. Note that the metric is not symmetric or directed; for instance, the confidence for A->C is different than the confidence for C->A. The confidence is 1 (maximal) for a rule A->C if the consequent and antecedent always occur together.","title":"'confidence':"},{"location":"user_guide/frequent_patterns/association_rules/#lift","text":"\\text{lift}(A\\rightarrow C) = \\frac{\\text{confidence}(A\\rightarrow C)}{\\text{support}(C)}, \\;\\;\\; \\text{range: } [0, \\infty] introduced in [4] The lift metric is commonly used to measure how much more often the antecedent and consequent of a rule A->C occur together than we would expect if they were statistically independent. If A and C are independent, the Lift score will be exactly 1.","title":"'lift':"},{"location":"user_guide/frequent_patterns/association_rules/#leverage","text":"\\text{levarage}(A\\rightarrow C) = \\text{support}(A\\rightarrow C) - \\text{support}(A) \\times \\text{support}(C), \\;\\;\\; \\text{range: } [-1, 1] introduced in [5] Leverage computes the difference between the observed frequency of A and C appearing together and the frequency that would be expected if A and C were independent. An leverage value of 0 indicates independence.","title":"'leverage':"},{"location":"user_guide/frequent_patterns/association_rules/#conviction","text":"\\text{conviction}(A\\rightarrow C) = \\frac{1 - \\text{support}(C)}{1 - \\text{confidence}(A\\rightarrow C)}, \\;\\;\\; \\text{range: } [0, \\infty] introduced in [6] A high conviction value means that the consequent is highly depending on the antecedent. For instance, in the case of a perfect confidence score, the denominator becomes 0 (due to 1 - 1) for which the conviction score is defined as 'inf'. Similar to lift, if items are independent, the conviction is 1.","title":"'conviction':"},{"location":"user_guide/frequent_patterns/association_rules/#references","text":"[1] Tan, Steinbach, Kumar. Introduction to Data Mining. Pearson New International Edition. Harlow: Pearson Education Ltd., 2014. (pp. 327-414). [2] Michael Hahsler, http://michael.hahsler.net/research/association_rules/measures.html [3] R. Agrawal, T. Imielinski, and A. Swami. Mining associations between sets of items in large databases. In Proc. of the ACM SIGMOD Int'l Conference on Management of Data, pages 207-216, Washington D.C., May 1993 [4] S. Brin, R. Motwani, J. D. Ullman, and S. Tsur. Dynamic itemset counting and implication rules for market basket data [5] Piatetsky-Shapiro, G., Discovery, analysis, and presentation of strong rules. Knowledge Discovery in Databases, 1991: p. 229-248. [6] Sergey Brin, Rajeev Motwani, Jeffrey D. Ullman, and Shalom Turk. Dynamic itemset counting and implication rules for market basket data. In SIGMOD 1997, Proceedings ACM SIGMOD International Conference on Management of Data, pages 255-264, Tucson, Arizona, USA, May 1997","title":"References"},{"location":"user_guide/frequent_patterns/association_rules/#example-1-generating-association-rules-from-frequent-itemsets","text":"The generate_rules takes dataframes of frequent itemsets as produced by the apriori function in mlxtend.association . To demonstrate the usage of the generate_rules method, we first create a pandas DataFrame of frequent itemsets as generated by the apriori function: import pandas as pd from mlxtend.preprocessing import TransactionEncoder from mlxtend.frequent_patterns import apriori dataset = [['Milk', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Dill', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'], ['Milk', 'Apple', 'Kidney Beans', 'Eggs'], ['Milk', 'Unicorn', 'Corn', 'Kidney Beans', 'Yogurt'], ['Corn', 'Onion', 'Onion', 'Kidney Beans', 'Ice cream', 'Eggs']] te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) df = pd.DataFrame(te_ary, columns=te.columns_) frequent_itemsets = apriori(df, min_support=0.6, use_colnames=True) frequent_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } support itemsets 0 0.8 (Eggs) 1 1.0 (Kidney Beans) 2 0.6 (Milk) 3 0.6 (Onion) 4 0.6 (Yogurt) 5 0.8 (Kidney Beans, Eggs) 6 0.6 (Onion, Eggs) 7 0.6 (Milk, Kidney Beans) 8 0.6 (Onion, Kidney Beans) 9 0.6 (Kidney Beans, Yogurt) 10 0.6 (Onion, Kidney Beans, Eggs) The generate_rules() function allows you to (1) specify your metric of interest and (2) the according threshold. Currently implemented measures are confidence and lift . Let's say you are interesting in rules derived from the frequent itemsets only if the level of confidence is above the 90 percent threshold ( min_threshold=0.7 ): from mlxtend.frequent_patterns import association_rules association_rules(frequent_itemsets, metric=\"confidence\", min_threshold=0.7) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (Kidney Beans) (Eggs) 1.0 0.8 0.8 0.80 1.00 0.00 1.000000 1 (Eggs) (Kidney Beans) 0.8 1.0 0.8 1.00 1.00 0.00 inf 2 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 3 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 4 (Milk) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 5 (Onion) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 6 (Yogurt) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 7 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 8 (Onion, Eggs) (Kidney Beans) 0.6 1.0 0.6 1.00 1.00 0.00 inf 9 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 10 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 11 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000","title":"Example 1 -- Generating Association Rules from Frequent Itemsets"},{"location":"user_guide/frequent_patterns/association_rules/#example-2-rule-generation-and-selection-criteria","text":"If you are interested in rules according to a different metric of interest, you can simply adjust the metric and min_threshold arguments . E.g. if you are only interested in rules that have a lift score of >= 1.2, you would do the following: rules = association_rules(frequent_itemsets, metric=\"lift\", min_threshold=1.2) rules .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 4 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 5 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 Pandas DataFrames make it easy to filter the results further. Let's say we are ony interested in rules that satisfy the following criteria: at least 2 antecedents a confidence > 0.75 a lift score > 1.2 We could compute the antecedent length as follows: rules[\"antecedent_len\"] = rules[\"antecedents\"].apply(lambda x: len(x)) rules .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 0 (Onion) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 1 (Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 1 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 2 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 2 4 (Onion) (Kidney Beans, Eggs) 0.6 0.8 0.6 1.00 1.25 0.12 inf 1 5 (Eggs) (Onion, Kidney Beans) 0.8 0.6 0.6 0.75 1.25 0.12 1.600000 1 Then, we can use pandas' selection syntax as shown below: rules[ (rules['antecedent_len'] >= 2) & (rules['confidence'] > 0.75) & (rules['lift'] > 1.2) ] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 2 (Onion, Kidney Beans) (Eggs) 0.6 0.8 0.6 1.0 1.25 0.12 inf 2 Similarly, using the Pandas API, we can select entries based on the \"antecedents\" or \"consequents\" columns: rules[rules['antecedents'] == {'Eggs', 'Kidney Beans'}] .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction antecedent_len 3 (Kidney Beans, Eggs) (Onion) 0.8 0.6 0.6 0.75 1.25 0.12 1.6 2 Frozensets Note that the entries in the \"itemsets\" column are of type frozenset , which is built-in Python type that is similar to a Python set but immutable, which makes it more efficient for certain query or comparison operations (https://docs.python.org/3.6/library/stdtypes.html#frozenset). Since frozenset s are sets, the item order does not matter. I.e., the query rules[rules['antecedents'] == {'Eggs', 'Kidney Beans'}] is equivalent to any of the following three rules[rules['antecedents'] == {'Kidney Beans', 'Eggs'}] rules[rules['antecedents'] == frozenset(('Eggs', 'Kidney Beans'))] rules[rules['antecedents'] == frozenset(('Kidney Beans', 'Eggs'))]","title":"Example 2 -- Rule Generation and Selection Criteria"},{"location":"user_guide/frequent_patterns/association_rules/#example-3-frequent-itemsets-with-incomplete-antecedent-and-consequent-information","text":"Most metrics computed by association_rules depends on the consequent and antecedent support score of a given rule provided in the frequent itemset input DataFrame. Consider the following example: import pandas as pd dict = {'itemsets': [['177', '176'], ['177', '179'], ['176', '178'], ['176', '179'], ['93', '100'], ['177', '178'], ['177', '176', '178']], 'support':[0.253623, 0.253623, 0.217391, 0.217391, 0.181159, 0.108696, 0.108696]} freq_itemsets = pd.DataFrame(dict) freq_itemsets .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } itemsets support 0 [177, 176] 0.253623 1 [177, 179] 0.253623 2 [176, 178] 0.217391 3 [176, 179] 0.217391 4 [93, 100] 0.181159 5 [177, 178] 0.108696 6 [177, 176, 178] 0.108696 Note that this is a \"cropped\" DataFrame that doesn't contain the support values of the item subsets. This can create problems if we want to compute the association rule metrics for, e.g., 176 => 177 . For example, the confidence is computed as \\text{confidence}(A\\rightarrow C) = \\frac{\\text{support}(A\\rightarrow C)}{\\text{support}(A)}, \\;\\;\\; \\text{range: } [0, 1] But we do not have \\text{support}(A) . All we know about \"A\"'s support is that it is at least 0.253623. In these scenarios, where not all metric's can be computed, due to incomplete input DataFrames, you can use the support_only=True option, which will only compute the support column of a given rule that does not require as much info: \\text{support}(A\\rightarrow C) = \\text{support}(A \\cup C), \\;\\;\\; \\text{range: } [0, 1] \"NaN's\" will be assigned to all other metric columns: from mlxtend.frequent_patterns import association_rules res = association_rules(freq_itemsets, support_only=True, min_threshold=0.1) res .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents antecedent support consequent support support confidence lift leverage conviction 0 (176) (177) NaN NaN 0.253623 NaN NaN NaN NaN 1 (177) (176) NaN NaN 0.253623 NaN NaN NaN NaN 2 (179) (177) NaN NaN 0.253623 NaN NaN NaN NaN 3 (177) (179) NaN NaN 0.253623 NaN NaN NaN NaN 4 (176) (178) NaN NaN 0.217391 NaN NaN NaN NaN 5 (178) (176) NaN NaN 0.217391 NaN NaN NaN NaN 6 (179) (176) NaN NaN 0.217391 NaN NaN NaN NaN 7 (176) (179) NaN NaN 0.217391 NaN NaN NaN NaN 8 (93) (100) NaN NaN 0.181159 NaN NaN NaN NaN 9 (100) (93) NaN NaN 0.181159 NaN NaN NaN NaN 10 (177) (178) NaN NaN 0.108696 NaN NaN NaN NaN 11 (178) (177) NaN NaN 0.108696 NaN NaN NaN NaN 12 (176, 177) (178) NaN NaN 0.108696 NaN NaN NaN NaN 13 (176, 178) (177) NaN NaN 0.108696 NaN NaN NaN NaN 14 (177, 178) (176) NaN NaN 0.108696 NaN NaN NaN NaN 15 (176) (177, 178) NaN NaN 0.108696 NaN NaN NaN NaN 16 (177) (176, 178) NaN NaN 0.108696 NaN NaN NaN NaN 17 (178) (176, 177) NaN NaN 0.108696 NaN NaN NaN NaN To clean up the representation, you may want to do the following: res = res[['antecedents', 'consequents', 'support']] res .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } antecedents consequents support 0 (176) (177) 0.253623 1 (177) (176) 0.253623 2 (179) (177) 0.253623 3 (177) (179) 0.253623 4 (176) (178) 0.217391 5 (178) (176) 0.217391 6 (179) (176) 0.217391 7 (176) (179) 0.217391 8 (93) (100) 0.181159 9 (100) (93) 0.181159 10 (177) (178) 0.108696 11 (178) (177) 0.108696 12 (176, 177) (178) 0.108696 13 (176, 178) (177) 0.108696 14 (177, 178) (176) 0.108696 15 (176) (177, 178) 0.108696 16 (177) (176, 178) 0.108696 17 (178) (176, 177) 0.108696","title":"Example 3 -- Frequent Itemsets with Incomplete Antecedent and Consequent Information"},{"location":"user_guide/frequent_patterns/association_rules/#api","text":"association_rules(df, metric='confidence', min_threshold=0.8, support_only=False) Generates a DataFrame of association rules including the metrics 'score', 'confidence', and 'lift' Parameters df : pandas DataFrame pandas DataFrame of frequent itemsets with columns ['support', 'itemsets'] metric : string (default: 'confidence') Metric to evaluate if a rule is of interest. Automatically set to 'support' if support_only=True . Otherwise, supported metrics are 'support', 'confidence', 'lift', 'leverage', and 'conviction' These metrics are computed as follows: - support(A->C) = support(A+C) [aka 'support'], range: [0, 1] - confidence(A->C) = support(A+C) / support(A), range: [0, 1] - lift(A->C) = confidence(A->C) / support(C), range: [0, inf] - leverage(A->C) = support(A->C) - support(A)*support(C), range: [-1, 1] - conviction = [1 - support(C)] / [1 - confidence(A->C)], range: [0, inf] min_threshold : float (default: 0.8) Minimal threshold for the evaluation metric, via the metric parameter, to decide whether a candidate rule is of interest. support_only : bool (default: False) Only computes the rule support and fills the other metric columns with NaNs. This is useful if: a) the input DataFrame is incomplete, e.g., does not contain support values for all rule antecedents and consequents b) you simply want to speed up the computation because you don't need the other metrics. Returns pandas DataFrame with columns \"antecedents\" and \"consequents\" that store itemsets, plus the scoring metric columns: \"antecedent support\", \"consequent support\", \"support\", \"confidence\", \"lift\", \"leverage\", \"conviction\" of all rules for which metric(rule) >= min_threshold. Each entry in the \"antecedents\" and \"consequents\" columns are of type frozenset , which is a Python built-in type that behaves similarly to sets except that it is immutable (For more info, see https://docs.python.org/3.6/library/stdtypes.html#frozenset). Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/","title":"API"},{"location":"user_guide/general_concepts/activation-functions/","text":"Activation Functions for Artificial Neural Networks","title":"Activation Functions for Artificial Neural Networks"},{"location":"user_guide/general_concepts/activation-functions/#activation-functions-for-artificial-neural-networks","text":"","title":"Activation Functions for Artificial Neural Networks"},{"location":"user_guide/general_concepts/gradient-optimization/","text":"Gradient Descent and Stochastic Gradient Descent Gradient Descent (GD) Optimization Using the Gradient Decent optimization algorithm, the weights are updated incrementally after each epoch (= pass over the training dataset). Compatible cost functions J(\\cdot) Sum of squared errors (SSE) [ mlxtend.regressor.LinearRegression , mlxtend.classfier.Adaline ]: J(\\mathbf{w}) = \\frac{1}{2} \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})^2 Logistic Cost (cross-entropy) [ mlxtend.classfier.LogisticRegression ]: ... The magnitude and direction of the weight update is computed by taking a step in the opposite direction of the cost gradient \\Delta w_j = -\\eta \\frac{\\partial J}{\\partial w_j}, where \\eta is the learning rate. The weights are then updated after each epoch via the following update rule: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w}, where \\Delta\\mathbf{w} is a vector that contains the weight updates of each weight coefficient {w} , which are computed as follows: \\Delta w_j = -\\eta \\frac{\\partial J}{\\partial w_j}\\\\ = -\\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})(-x_{j}^{(i)})\\\\ = \\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)}. Essentially, we can picture Gradient Descent optimization as a hiker (the weight coefficient) who wants to climb down a mountain (cost function) into valley (cost minimum), and each step is determined by the steepness of the slope (gradient) and the leg length of the hiker (learning rate). Considering a cost function with only a single weight coefficient, we can illustrate this concept as follows: Stochastic Gradient Descent (SGD) In Gradient Descent optimization, we compute the cost gradient based on the complete training set; hence, we sometimes also call it batch gradient descent . In case of very large datasets, using Gradient Descent can be quite costly since we are only taking a single step for one pass over the training set -- thus, the larger the training set, the slower our algorithm updates the weights and the longer it may take until it converges to the global cost minimum (note that the SSE cost function is convex). In Stochastic Gradient Descent (sometimes also referred to as iterative or on-line gradient descent), we don't accumulate the weight updates as we've seen above for Gradient Descent: for one or more epochs: for each weight j w_j := w + \\Delta w_j , where: \\Delta w_j= \\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)} Instead, we update the weights after each training sample: for one or more epochs, or until approx. cost minimum is reached: for training sample i : for each weight j w_j := w + \\Delta w_j , where: \\Delta w_j= \\eta (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)} Here, the term \"stochastic\" comes from the fact that the gradient based on a single training sample is a \"stochastic approximation\" of the \"true\" cost gradient. Due to its stochastic nature, the path towards the global cost minimum is not \"direct\" as in Gradient Descent, but may go \"zig-zag\" if we are visuallizing the cost surface in a 2D space. However, it has been shown that Stochastic Gradient Descent almost surely converges to the global cost minimum if the cost function is convex (or pseudo-convex)[1]. Stochastic Gradient Descent Shuffling There are several different flavors of stochastic gradient descent, which can be all seen throughout the literature. Let's take a look at the three most common variants: A) randomly shuffle samples in the training set for one or more epochs, or until approx. cost minimum is reached for training sample i compute gradients and perform weight updates B) for one or more epochs, or until approx. cost minimum is reached randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates C) for iterations t , or until approx. cost minimum is reached: draw random sample from the training set compute gradients and perform weight updates In scenario A [3], we shuffle the training set only one time in the beginning; whereas in scenario B, we shuffle the training set after each epoch to prevent repeating update cycles. In both scenario A and scenario B, each training sample is only used once per epoch to update the model weights. In scenario C, we draw the training samples randomly with replacement from the training set [2]. If the number of iterations t is equal to the number of training samples, we learn the model based on a bootstrap sample of the training set. Mini-Batch Gradient Descent (MB-GD) Mini-Batch Gradient Descent (MB-GD) a compromise between batch GD and SGD. In MB-GD, we update the model based on smaller groups of training samples; instead of computing the gradient from 1 sample (SGD) or all n training samples (GD), we compute the gradient from 1 < k < n training samples (a common mini-batch size is k=50 ). MB-GD converges in fewer iterations than GD because we update the weights more frequently; however, MB-GD let's us utilize vectorized operation, which typically results in a computational performance gain over SGD. Learning Rates An adaptive learning rate \\eta : Choosing a decrease constant d that shrinks the learning rate over time: \\eta(t+1) := \\eta(t) / (1 + t \\times d) Momentum learning by adding a factor of the previous gradient to the weight update for faster updates: \\Delta \\mathbf{w}_{t+1} := \\eta \\nabla J(\\mathbf{w}_{t+1}) + \\alpha \\Delta {w}_{t} References [1] Bottou, L\u00e9on (1998). \"Online Algorithms and Stochastic Approximations\" . Online Learning and Neural Networks. Cambridge University Press. ISBN 978-0-521-65263-6 [2] Bottou, L\u00e9on. \"Large-scale machine learning with stochastic gradient descent.\" Proceedings of COMPSTAT'2010. Physica-Verlag HD, 2010. 177-186. [3] Bottou, L\u00e9on. \"Stochastic gradient descent tricks.\" Neural Networks: Tricks of the Trade. Springer Berlin Heidelberg, 2012. 421-436.","title":"Gradient Descent and Stochastic Gradient Descent"},{"location":"user_guide/general_concepts/gradient-optimization/#gradient-descent-and-stochastic-gradient-descent","text":"","title":"Gradient Descent and Stochastic Gradient Descent"},{"location":"user_guide/general_concepts/gradient-optimization/#gradient-descent-gd-optimization","text":"Using the Gradient Decent optimization algorithm, the weights are updated incrementally after each epoch (= pass over the training dataset). Compatible cost functions J(\\cdot) Sum of squared errors (SSE) [ mlxtend.regressor.LinearRegression , mlxtend.classfier.Adaline ]: J(\\mathbf{w}) = \\frac{1}{2} \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})^2 Logistic Cost (cross-entropy) [ mlxtend.classfier.LogisticRegression ]: ... The magnitude and direction of the weight update is computed by taking a step in the opposite direction of the cost gradient \\Delta w_j = -\\eta \\frac{\\partial J}{\\partial w_j}, where \\eta is the learning rate. The weights are then updated after each epoch via the following update rule: \\mathbf{w} := \\mathbf{w} + \\Delta\\mathbf{w}, where \\Delta\\mathbf{w} is a vector that contains the weight updates of each weight coefficient {w} , which are computed as follows: \\Delta w_j = -\\eta \\frac{\\partial J}{\\partial w_j}\\\\ = -\\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})(-x_{j}^{(i)})\\\\ = \\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)}. Essentially, we can picture Gradient Descent optimization as a hiker (the weight coefficient) who wants to climb down a mountain (cost function) into valley (cost minimum), and each step is determined by the steepness of the slope (gradient) and the leg length of the hiker (learning rate). Considering a cost function with only a single weight coefficient, we can illustrate this concept as follows:","title":"Gradient Descent (GD) Optimization"},{"location":"user_guide/general_concepts/gradient-optimization/#stochastic-gradient-descent-sgd","text":"In Gradient Descent optimization, we compute the cost gradient based on the complete training set; hence, we sometimes also call it batch gradient descent . In case of very large datasets, using Gradient Descent can be quite costly since we are only taking a single step for one pass over the training set -- thus, the larger the training set, the slower our algorithm updates the weights and the longer it may take until it converges to the global cost minimum (note that the SSE cost function is convex). In Stochastic Gradient Descent (sometimes also referred to as iterative or on-line gradient descent), we don't accumulate the weight updates as we've seen above for Gradient Descent: for one or more epochs: for each weight j w_j := w + \\Delta w_j , where: \\Delta w_j= \\eta \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)} Instead, we update the weights after each training sample: for one or more epochs, or until approx. cost minimum is reached: for training sample i : for each weight j w_j := w + \\Delta w_j , where: \\Delta w_j= \\eta (\\text{target}^{(i)} - \\text{output}^{(i)})x_{j}^{(i)} Here, the term \"stochastic\" comes from the fact that the gradient based on a single training sample is a \"stochastic approximation\" of the \"true\" cost gradient. Due to its stochastic nature, the path towards the global cost minimum is not \"direct\" as in Gradient Descent, but may go \"zig-zag\" if we are visuallizing the cost surface in a 2D space. However, it has been shown that Stochastic Gradient Descent almost surely converges to the global cost minimum if the cost function is convex (or pseudo-convex)[1].","title":"Stochastic Gradient Descent (SGD)"},{"location":"user_guide/general_concepts/gradient-optimization/#stochastic-gradient-descent-shuffling","text":"There are several different flavors of stochastic gradient descent, which can be all seen throughout the literature. Let's take a look at the three most common variants:","title":"Stochastic Gradient Descent Shuffling"},{"location":"user_guide/general_concepts/gradient-optimization/#a","text":"randomly shuffle samples in the training set for one or more epochs, or until approx. cost minimum is reached for training sample i compute gradients and perform weight updates","title":"A)"},{"location":"user_guide/general_concepts/gradient-optimization/#b","text":"for one or more epochs, or until approx. cost minimum is reached randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates","title":"B)"},{"location":"user_guide/general_concepts/gradient-optimization/#c","text":"for iterations t , or until approx. cost minimum is reached: draw random sample from the training set compute gradients and perform weight updates In scenario A [3], we shuffle the training set only one time in the beginning; whereas in scenario B, we shuffle the training set after each epoch to prevent repeating update cycles. In both scenario A and scenario B, each training sample is only used once per epoch to update the model weights. In scenario C, we draw the training samples randomly with replacement from the training set [2]. If the number of iterations t is equal to the number of training samples, we learn the model based on a bootstrap sample of the training set.","title":"C)"},{"location":"user_guide/general_concepts/gradient-optimization/#mini-batch-gradient-descent-mb-gd","text":"Mini-Batch Gradient Descent (MB-GD) a compromise between batch GD and SGD. In MB-GD, we update the model based on smaller groups of training samples; instead of computing the gradient from 1 sample (SGD) or all n training samples (GD), we compute the gradient from 1 < k < n training samples (a common mini-batch size is k=50 ). MB-GD converges in fewer iterations than GD because we update the weights more frequently; however, MB-GD let's us utilize vectorized operation, which typically results in a computational performance gain over SGD.","title":"Mini-Batch Gradient Descent (MB-GD)"},{"location":"user_guide/general_concepts/gradient-optimization/#learning-rates","text":"An adaptive learning rate \\eta : Choosing a decrease constant d that shrinks the learning rate over time: \\eta(t+1) := \\eta(t) / (1 + t \\times d) Momentum learning by adding a factor of the previous gradient to the weight update for faster updates: \\Delta \\mathbf{w}_{t+1} := \\eta \\nabla J(\\mathbf{w}_{t+1}) + \\alpha \\Delta {w}_{t}","title":"Learning Rates"},{"location":"user_guide/general_concepts/gradient-optimization/#references","text":"[1] Bottou, L\u00e9on (1998). \"Online Algorithms and Stochastic Approximations\" . Online Learning and Neural Networks. Cambridge University Press. ISBN 978-0-521-65263-6 [2] Bottou, L\u00e9on. \"Large-scale machine learning with stochastic gradient descent.\" Proceedings of COMPSTAT'2010. Physica-Verlag HD, 2010. 177-186. [3] Bottou, L\u00e9on. \"Stochastic gradient descent tricks.\" Neural Networks: Tricks of the Trade. Springer Berlin Heidelberg, 2012. 421-436.","title":"References"},{"location":"user_guide/general_concepts/linear-gradient-derivative/","text":"Deriving the Gradient Descent Rule for Linear Regression and Adaline Linear Regression and Adaptive Linear Neurons (Adalines) are closely related to each other. In fact, the Adaline algorithm is a identical to linear regression except for a threshold function \\phi(\\cdot)_T that converts the continuous output into a categorical class label \\phi(z)_T = \\begin{cases} 1 & if \\; z \\geq 0 \\\\ 0 & if \\; z < 0 \\end{cases}, where z is the net input, which is computed as the sum of the input features \\mathbf{x} multiplied by the model weights \\mathbf{w} : z = w_0x_0 + w_1x_1 \\dots w_mx_m = \\sum_{j=0}^{m} x_j w_j = \\mathbf{w}^T \\mathbf{x} (Note that x_0 refers to the bias unit so that x_0=1 .) In the case of linear regression and Adaline, the activation function \\phi(\\cdot)_A is simply the identity function so that \\phi(z)_A = z . Now, in order to learn the optimal model weights \\mathbf{w} , we need to define a cost function that we can optimize. Here, our cost function J({\\cdot}) is the sum of squared errors (SSE), which we multiply by \\frac{1}{2} to make the derivation easier: J({\\mathbf{w}}) = \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2, where y^{(i)} is the label or target label of the i th training point x^{(i)} . (Note that the SSE cost function is convex and therefore differentiable.) In simple words, we can summarize the gradient descent learning as follows: Initialize the weights to 0 or small random numbers. For k epochs (passes over the training set) For each training sample x^{(i)} Compute the predicted output value \\hat{y}^{(i)} Compare \\hat{y}^{(i)} to the actual output y^{(i)} and Compute the \"weight update\" value Update the \"weight update\" value Update the weight coefficients by the accumulated \"weight update\" values Which we can translate into a more mathematical notation: Initialize the weights to 0 or small random numbers. For k epochs For each training sample x^{(i)} \\phi(z^{(i)})_A = \\hat{y}^{(i)} \\Delta w_{(t+1), \\; j} = \\eta (y^{(i)} - \\hat{y}^{(i)}) x_{j}^{(i)}\\; (where \\eta is the learning rate); \\Delta w_{j} := \\Delta w_j\\; + \\Delta w_{(t+1), \\;j} \\mathbf{w} := \\mathbf{w} + \\Delta \\mathbf{w} Performing this global weight update \\mathbf{w} := \\mathbf{w} + \\Delta \\mathbf{w}, can be understood as \"updating the model weights by taking an opposite step towards the cost gradient scaled by the learning rate \\eta \" \\Delta \\mathbf{w} = - \\eta \\nabla J(\\mathbf{w}), where the partial derivative with respect to each w_j can be written as \\frac{\\partial J}{\\partial w_j} = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) x_{j}^{(i)}. To summarize: in order to use gradient descent to learn the model coefficients, we simply update the weights \\mathbf{w} by taking a step into the opposite direction of the gradient for each pass over the training set -- that's basically it. But how do we get to the equation \\frac{\\partial J}{\\partial w_j} = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) x_{j}^{(i)}? Let's walk through the derivation step by step. \\begin{aligned} & \\frac{\\partial J}{\\partial w_j} \\\\ & = \\frac{\\partial}{\\partial w_j} \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2 \\\\ & = \\frac{1}{2} \\frac{\\partial}{\\partial w_j} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2 \\\\ & = \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\frac{\\partial}{\\partial w_j} \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\\\ & = \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\frac{\\partial}{\\partial w_j} \\bigg(y^{(i)} - \\sum_i \\big(w_{j}^{(i)} x_{j}^{(i)} \\big) \\bigg) \\\\ & = \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)(-x_{j}^{(i)}) \\\\ & = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)x_{j}^{(i)} \\end{aligned}","title":"Deriving the Gradient Descent Rule for Linear Regression and Adaline"},{"location":"user_guide/general_concepts/linear-gradient-derivative/#deriving-the-gradient-descent-rule-for-linear-regression-and-adaline","text":"Linear Regression and Adaptive Linear Neurons (Adalines) are closely related to each other. In fact, the Adaline algorithm is a identical to linear regression except for a threshold function \\phi(\\cdot)_T that converts the continuous output into a categorical class label \\phi(z)_T = \\begin{cases} 1 & if \\; z \\geq 0 \\\\ 0 & if \\; z < 0 \\end{cases}, where z is the net input, which is computed as the sum of the input features \\mathbf{x} multiplied by the model weights \\mathbf{w} : z = w_0x_0 + w_1x_1 \\dots w_mx_m = \\sum_{j=0}^{m} x_j w_j = \\mathbf{w}^T \\mathbf{x} (Note that x_0 refers to the bias unit so that x_0=1 .) In the case of linear regression and Adaline, the activation function \\phi(\\cdot)_A is simply the identity function so that \\phi(z)_A = z . Now, in order to learn the optimal model weights \\mathbf{w} , we need to define a cost function that we can optimize. Here, our cost function J({\\cdot}) is the sum of squared errors (SSE), which we multiply by \\frac{1}{2} to make the derivation easier: J({\\mathbf{w}}) = \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2, where y^{(i)} is the label or target label of the i th training point x^{(i)} . (Note that the SSE cost function is convex and therefore differentiable.) In simple words, we can summarize the gradient descent learning as follows: Initialize the weights to 0 or small random numbers. For k epochs (passes over the training set) For each training sample x^{(i)} Compute the predicted output value \\hat{y}^{(i)} Compare \\hat{y}^{(i)} to the actual output y^{(i)} and Compute the \"weight update\" value Update the \"weight update\" value Update the weight coefficients by the accumulated \"weight update\" values Which we can translate into a more mathematical notation: Initialize the weights to 0 or small random numbers. For k epochs For each training sample x^{(i)} \\phi(z^{(i)})_A = \\hat{y}^{(i)} \\Delta w_{(t+1), \\; j} = \\eta (y^{(i)} - \\hat{y}^{(i)}) x_{j}^{(i)}\\; (where \\eta is the learning rate); \\Delta w_{j} := \\Delta w_j\\; + \\Delta w_{(t+1), \\;j} \\mathbf{w} := \\mathbf{w} + \\Delta \\mathbf{w} Performing this global weight update \\mathbf{w} := \\mathbf{w} + \\Delta \\mathbf{w}, can be understood as \"updating the model weights by taking an opposite step towards the cost gradient scaled by the learning rate \\eta \" \\Delta \\mathbf{w} = - \\eta \\nabla J(\\mathbf{w}), where the partial derivative with respect to each w_j can be written as \\frac{\\partial J}{\\partial w_j} = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) x_{j}^{(i)}. To summarize: in order to use gradient descent to learn the model coefficients, we simply update the weights \\mathbf{w} by taking a step into the opposite direction of the gradient for each pass over the training set -- that's basically it. But how do we get to the equation \\frac{\\partial J}{\\partial w_j} = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) x_{j}^{(i)}? Let's walk through the derivation step by step. \\begin{aligned} & \\frac{\\partial J}{\\partial w_j} \\\\ & = \\frac{\\partial}{\\partial w_j} \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2 \\\\ & = \\frac{1}{2} \\frac{\\partial}{\\partial w_j} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)^2 \\\\ & = \\frac{1}{2} \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\frac{\\partial}{\\partial w_j} \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\\\ & = \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big) \\frac{\\partial}{\\partial w_j} \\bigg(y^{(i)} - \\sum_i \\big(w_{j}^{(i)} x_{j}^{(i)} \\big) \\bigg) \\\\ & = \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)(-x_{j}^{(i)}) \\\\ & = - \\sum_i \\big(y^{(i)} - \\phi(z)_{A}^{(i)}\\big)x_{j}^{(i)} \\end{aligned}","title":"Deriving the Gradient Descent Rule for Linear Regression and Adaline"},{"location":"user_guide/general_concepts/regularization-linear/","text":"Regularization of Generalized Linear Models Overview We can understand regularization as an approach of adding an additional bias to a model to reduce the degree of overfitting in models that suffer from high variance. By adding regularization terms to the cost function, we penalize large model coefficients (weights); effectively, we are reducing the complexity of the model. L2 regularization In L2 regularization, we shrink the weights by computing the Euclidean norm of the weight coefficients (the weight vector \\mathbf{w} ); \\lambda is the regularization parameter to be optimized. L2: \\lambda\\; \\lVert \\mathbf{w} \\lVert_2 = \\lambda \\sum_{j=1}^{m} w_j^2 For example, we can regularize the sum of squared errors cost function (SSE) as follows: SSE = \\sum^{n}_{i=1} \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 + L2 Intuitively, we can think of regression as an additional penalty term or constraint as shown in the figure below. Without regularization, our objective is to find the global cost minimum. By adding a regularization penalty, our objective becomes to minimize the cost function under the constraint that we have to stay within our \"budget\" (the gray-shaded ball). In addition, we can control the regularization strength via the regularization parameter \\lambda . The larger the value of \\lambda , the stronger the regularization of the model. The weight coefficients approach 0 when \\lambda goes towards infinity. L1 regularization In L1 regularization, we shrink the weights using the absolute values of the weight coefficients (the weight vector \\mathbf{w} ); \\lambda is the regularization parameter to be optimized. L1: \\lambda \\; \\lVert\\mathbf{w}\\rVert_1 = \\lambda \\sum_{j=1}^{m} |w_j| For example, we can regularize the sum of squared errors cost function (SSE) as follows: SSE = \\sum^{n}_{i=1} \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 + L1 At its core, L1-regularization is very similar to L2 regularization. However, instead of a quadratic penalty term as in L2, we penalize the model by the absolute weight coefficients. As we can see in the figure below, our \"budget\" has \"sharp edges,\" which is the geometric interpretation of why the L1 model induces sparsity. References [1] M. Y. Park and T. Hastie. \"L1-regularization path algorithm for generalized linear models\" . Journal of the Royal Statistical Society: Series B (Statistical Methodology), 69(4):659\u2013677, 2007. [2] A. Y. Ng. \"Feature selection, L1 vs. L2 regularization, and rotational invariance\" . In Proceedings of the twenty-first international conference on Machine learning, page 78. ACM, 2004.","title":"Regularization of Generalized Linear Models"},{"location":"user_guide/general_concepts/regularization-linear/#regularization-of-generalized-linear-models","text":"","title":"Regularization of Generalized Linear Models"},{"location":"user_guide/general_concepts/regularization-linear/#overview","text":"We can understand regularization as an approach of adding an additional bias to a model to reduce the degree of overfitting in models that suffer from high variance. By adding regularization terms to the cost function, we penalize large model coefficients (weights); effectively, we are reducing the complexity of the model.","title":"Overview"},{"location":"user_guide/general_concepts/regularization-linear/#l2-regularization","text":"In L2 regularization, we shrink the weights by computing the Euclidean norm of the weight coefficients (the weight vector \\mathbf{w} ); \\lambda is the regularization parameter to be optimized. L2: \\lambda\\; \\lVert \\mathbf{w} \\lVert_2 = \\lambda \\sum_{j=1}^{m} w_j^2 For example, we can regularize the sum of squared errors cost function (SSE) as follows: SSE = \\sum^{n}_{i=1} \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 + L2 Intuitively, we can think of regression as an additional penalty term or constraint as shown in the figure below. Without regularization, our objective is to find the global cost minimum. By adding a regularization penalty, our objective becomes to minimize the cost function under the constraint that we have to stay within our \"budget\" (the gray-shaded ball). In addition, we can control the regularization strength via the regularization parameter \\lambda . The larger the value of \\lambda , the stronger the regularization of the model. The weight coefficients approach 0 when \\lambda goes towards infinity.","title":"L2 regularization"},{"location":"user_guide/general_concepts/regularization-linear/#l1-regularization","text":"In L1 regularization, we shrink the weights using the absolute values of the weight coefficients (the weight vector \\mathbf{w} ); \\lambda is the regularization parameter to be optimized. L1: \\lambda \\; \\lVert\\mathbf{w}\\rVert_1 = \\lambda \\sum_{j=1}^{m} |w_j| For example, we can regularize the sum of squared errors cost function (SSE) as follows: SSE = \\sum^{n}_{i=1} \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 + L1 At its core, L1-regularization is very similar to L2 regularization. However, instead of a quadratic penalty term as in L2, we penalize the model by the absolute weight coefficients. As we can see in the figure below, our \"budget\" has \"sharp edges,\" which is the geometric interpretation of why the L1 model induces sparsity.","title":"L1 regularization"},{"location":"user_guide/general_concepts/regularization-linear/#references","text":"[1] M. Y. Park and T. Hastie. \"L1-regularization path algorithm for generalized linear models\" . Journal of the Royal Statistical Society: Series B (Statistical Methodology), 69(4):659\u2013677, 2007. [2] A. Y. Ng. \"Feature selection, L1 vs. L2 regularization, and rotational invariance\" . In Proceedings of the twenty-first international conference on Machine learning, page 78. ACM, 2004.","title":"References"},{"location":"user_guide/image/extract_face_landmarks/","text":"Extract Face Landmarks A function extract facial landmarks. from mlxtend.image import extract_face_landmarks Overview The extract_face_landmarks function detects the faces in a given image, and then it will return the face landmark points (also known as face shape) for the first found face in the image based on dlib's face landmark detection code (http://dlib.net/face_landmark_detection_ex.cpp.html): The face detector we use is made using the classic Histogram of Oriented Gradients (HOG) feature combined with a linear classifier, an image pyramid, and sliding window detection scheme. The pose estimator was created by using dlib's implementation of the paper: One Millisecond Face Alignment with an Ensemble of Regression Trees by Vahid Kazemi and Josephine Sullivan, CVPR 2014 and was trained on the iBUG 300-W face landmark dataset (see https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/): C. Sagonas, E. Antonakos, G, Tzimiropoulos, S. Zafeiriou, M. Pantic. 300 faces In-the-wild challenge: Database and results. Image and Vision Computing (IMAVIS), Special Issue on Facial Landmark Localisation \"In-The-Wild\". 2016. You can get the trained model file from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2. Note that the license for the iBUG 300-W dataset excludes commercial use. So you should contact Imperial College London to find out if it's OK for you to use this model file in a commercial product. References Kazemi, Vahid, and Josephine Sullivan. \"One millisecond face alignment with an ensemble of regression trees.\" Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2014. Example 1 import imageio import matplotlib.pyplot as plt from mlxtend.image import extract_face_landmarks img = imageio.imread('lena.png') landmarks = extract_face_landmarks(img) print(landmarks.shape) print('\\n\\nFirst 10 landmarks:\\n', landmarks[:10]) (68, 2) First 10 landmarks: [[206 266] [204 290] [205 314] [209 337] [220 357] [236 374] [253 387] [273 397] [290 398] [304 391]] Visualization of the landmarks: fig = plt.figure(figsize=(15, 5)) ax = fig.add_subplot(1, 3, 1) ax.imshow(img) ax = fig.add_subplot(1, 3, 2) ax.scatter(landmarks[:, 0], -landmarks[:, 1], alpha=0.8) ax = fig.add_subplot(1, 3, 3) img2 = img.copy() for p in landmarks: img2[p[1]-3:p[1]+3,p[0]-3:p[0]+3,:] = (255, 255, 255) ax.imshow(img2) plt.show() API extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"Extract Face Landmarks"},{"location":"user_guide/image/extract_face_landmarks/#extract-face-landmarks","text":"A function extract facial landmarks. from mlxtend.image import extract_face_landmarks","title":"Extract Face Landmarks"},{"location":"user_guide/image/extract_face_landmarks/#overview","text":"The extract_face_landmarks function detects the faces in a given image, and then it will return the face landmark points (also known as face shape) for the first found face in the image based on dlib's face landmark detection code (http://dlib.net/face_landmark_detection_ex.cpp.html): The face detector we use is made using the classic Histogram of Oriented Gradients (HOG) feature combined with a linear classifier, an image pyramid, and sliding window detection scheme. The pose estimator was created by using dlib's implementation of the paper: One Millisecond Face Alignment with an Ensemble of Regression Trees by Vahid Kazemi and Josephine Sullivan, CVPR 2014 and was trained on the iBUG 300-W face landmark dataset (see https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/): C. Sagonas, E. Antonakos, G, Tzimiropoulos, S. Zafeiriou, M. Pantic. 300 faces In-the-wild challenge: Database and results. Image and Vision Computing (IMAVIS), Special Issue on Facial Landmark Localisation \"In-The-Wild\". 2016. You can get the trained model file from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2. Note that the license for the iBUG 300-W dataset excludes commercial use. So you should contact Imperial College London to find out if it's OK for you to use this model file in a commercial product.","title":"Overview"},{"location":"user_guide/image/extract_face_landmarks/#references","text":"Kazemi, Vahid, and Josephine Sullivan. \"One millisecond face alignment with an ensemble of regression trees.\" Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2014.","title":"References"},{"location":"user_guide/image/extract_face_landmarks/#example-1","text":"import imageio import matplotlib.pyplot as plt from mlxtend.image import extract_face_landmarks img = imageio.imread('lena.png') landmarks = extract_face_landmarks(img) print(landmarks.shape) print('\\n\\nFirst 10 landmarks:\\n', landmarks[:10]) (68, 2) First 10 landmarks: [[206 266] [204 290] [205 314] [209 337] [220 357] [236 374] [253 387] [273 397] [290 398] [304 391]] Visualization of the landmarks: fig = plt.figure(figsize=(15, 5)) ax = fig.add_subplot(1, 3, 1) ax.imshow(img) ax = fig.add_subplot(1, 3, 2) ax.scatter(landmarks[:, 0], -landmarks[:, 1], alpha=0.8) ax = fig.add_subplot(1, 3, 3) img2 = img.copy() for p in landmarks: img2[p[1]-3:p[1]+3,p[0]-3:p[0]+3,:] = (255, 255, 255) ax.imshow(img2) plt.show()","title":"Example 1"},{"location":"user_guide/image/extract_face_landmarks/#api","text":"extract_face_landmarks(img, return_dtype= ) Function to extract face landmarks. Note that this function requires an installation of the Python version of the library \"dlib\": http://dlib.net Parameters img : array, shape = [h, w, ?] numpy array of a face image. Supported shapes are - 3D tensors with 1 or more color channels, for example, RGB: [h, w, 3] - 2D tensors without color channel, for example, Grayscale: [h, w] return_dtype: the return data-type of the array, default: np.int32. Returns landmarks : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/sources/image/extract_face_landmarks.ipynb","title":"API"},{"location":"user_guide/math/num_combinations/","text":"Compute the Number of Combinations A function to calculate the number of combinations for creating subsequences of k elements out of a sequence with n elements. from mlxtend.math import num_combinations Overview Combinations are selections of items from a collection regardless of the order in which they appear (in contrast to permutations). For example, let's consider a combination of 3 elements (k=3) from a collection of 5 elements (n=5): collection: {1, 2, 3, 4, 5} combination 1a: {1, 3, 5} combination 1b: {1, 5, 3} combination 1c: {3, 5, 1} ... combination 2: {1, 3, 4} In the example above the combinations 1a, 1b, and 1c, are the \"same combination\" and counted as \"1 possible way to combine items 1, 3, and 5\" -- in combinations, the order does not matter. The number of ways to combine elements ( without replacement ) from a collection with size n into subsets of size k is computed via the binomial coefficient (\" n choose k \"): \\begin{pmatrix} n \\\\ k \\end{pmatrix} = \\frac{n(n-1)\\ldots(n-k+1)}{k(k-1)\\dots1} = \\frac{n!}{k!(n-k)!} To compute the number of combinations with replacement , the following, alternative equation is used (\" n multichoose k \"): \\begin{pmatrix} n \\\\ k \\end{pmatrix} = \\begin{pmatrix} n + k -1 \\\\ k \\end{pmatrix} References https://en.wikipedia.org/wiki/Combination Example 1 - Compute the number of combinations from mlxtend.math import num_combinations c = num_combinations(n=20, k=8, with_replacement=False) print('Number of ways to combine 20 elements' ' into 8 subelements: %d' % c) Number of ways to combine 20 elements into 8 subelements: 125970 from mlxtend.math import num_combinations c = num_combinations(n=20, k=8, with_replacement=True) print('Number of ways to combine 20 elements' ' into 8 subelements (with replacement): %d' % c) Number of ways to combine 20 elements into 8 subelements (with replacement): 2220075 Example 2 - A progress tracking use-case It is often quite useful to track the progress of a computational expensive tasks to estimate its runtime. Here, the num_combination function can be used to compute the maximum number of loops of a combinations iterable from itertools: import itertools import sys import time from mlxtend.math import num_combinations items = {1, 2, 3, 4, 5, 6, 7, 8} max_iter = num_combinations(n=len(items), k=3, with_replacement=False) for idx, i in enumerate(itertools.combinations(items, r=3)): # do some computation with itemset i time.sleep(0.1) sys.stdout.write('\\rProgress: %d/%d' % (idx + 1, max_iter)) sys.stdout.flush() Progress: 56/56 API num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/","title":"Compute the Number of Combinations"},{"location":"user_guide/math/num_combinations/#compute-the-number-of-combinations","text":"A function to calculate the number of combinations for creating subsequences of k elements out of a sequence with n elements. from mlxtend.math import num_combinations","title":"Compute the Number of Combinations"},{"location":"user_guide/math/num_combinations/#overview","text":"Combinations are selections of items from a collection regardless of the order in which they appear (in contrast to permutations). For example, let's consider a combination of 3 elements (k=3) from a collection of 5 elements (n=5): collection: {1, 2, 3, 4, 5} combination 1a: {1, 3, 5} combination 1b: {1, 5, 3} combination 1c: {3, 5, 1} ... combination 2: {1, 3, 4} In the example above the combinations 1a, 1b, and 1c, are the \"same combination\" and counted as \"1 possible way to combine items 1, 3, and 5\" -- in combinations, the order does not matter. The number of ways to combine elements ( without replacement ) from a collection with size n into subsets of size k is computed via the binomial coefficient (\" n choose k \"): \\begin{pmatrix} n \\\\ k \\end{pmatrix} = \\frac{n(n-1)\\ldots(n-k+1)}{k(k-1)\\dots1} = \\frac{n!}{k!(n-k)!} To compute the number of combinations with replacement , the following, alternative equation is used (\" n multichoose k \"): \\begin{pmatrix} n \\\\ k \\end{pmatrix} = \\begin{pmatrix} n + k -1 \\\\ k \\end{pmatrix}","title":"Overview"},{"location":"user_guide/math/num_combinations/#references","text":"https://en.wikipedia.org/wiki/Combination","title":"References"},{"location":"user_guide/math/num_combinations/#example-1-compute-the-number-of-combinations","text":"from mlxtend.math import num_combinations c = num_combinations(n=20, k=8, with_replacement=False) print('Number of ways to combine 20 elements' ' into 8 subelements: %d' % c) Number of ways to combine 20 elements into 8 subelements: 125970 from mlxtend.math import num_combinations c = num_combinations(n=20, k=8, with_replacement=True) print('Number of ways to combine 20 elements' ' into 8 subelements (with replacement): %d' % c) Number of ways to combine 20 elements into 8 subelements (with replacement): 2220075","title":"Example 1 - Compute the number of combinations"},{"location":"user_guide/math/num_combinations/#example-2-a-progress-tracking-use-case","text":"It is often quite useful to track the progress of a computational expensive tasks to estimate its runtime. Here, the num_combination function can be used to compute the maximum number of loops of a combinations iterable from itertools: import itertools import sys import time from mlxtend.math import num_combinations items = {1, 2, 3, 4, 5, 6, 7, 8} max_iter = num_combinations(n=len(items), k=3, with_replacement=False) for idx, i in enumerate(itertools.combinations(items, r=3)): # do some computation with itemset i time.sleep(0.1) sys.stdout.write('\\rProgress: %d/%d' % (idx + 1, max_iter)) sys.stdout.flush() Progress: 56/56","title":"Example 2 - A progress tracking use-case"},{"location":"user_guide/math/num_combinations/#api","text":"num_combinations(n, k, with_replacement=False) Function to calculate the number of possible combinations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool (default: False) Allows repeated elements if True. Returns comb : int Number of possible combinations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/","title":"API"},{"location":"user_guide/math/num_permutations/","text":"Compute the Number of Permutations A function to calculate the number of permutations for creating subsequences of k elements out of a sequence with n elements. from mlxtend.math import num_permutations Overview Permutations are selections of items from a collection with regard to the order in which they appear (in contrast to combinations). For example, let's consider a permutation of 3 elements (k=3) from a collection of 5 elements (n=5): collection: {1, 2, 3, 4, 5} combination 1a: {1, 3, 5} combination 1b: {1, 5, 3} combination 1c: {3, 5, 1} ... combination 2: {1, 3, 4} In the example above the permutations 1a, 1b, and 1c, are the \"same combination\" but distinct permutations -- in combinations, the order does not matter, but in permutation it does matter. The number of ways to combine elements ( without replacement ) from a collection with size n into subsets of size k is computed via the binomial coefficient (\" n choose k \"): k!\\begin{pmatrix} n \\\\ k \\end{pmatrix} = k! \\cdot \\frac{n!}{k!(n-k)!} = \\frac{n!}{(n-k)!} To compute the number of permutations with replacement , we simply need to compute n^k . References https://en.wikipedia.org/wiki/Permutation Example 1 - Compute the number of permutations from mlxtend.math import num_permutations c = num_permutations(n=20, k=8, with_replacement=False) print('Number of ways to permute 20 elements' ' into 8 subelements: %d' % c) Number of ways to permute 20 elements into 8 subelements: 5079110400 from mlxtend.math import num_permutations c = num_permutations(n=20, k=8, with_replacement=True) print('Number of ways to combine 20 elements' ' into 8 subelements (with replacement): %d' % c) Number of ways to combine 20 elements into 8 subelements (with replacement): 25600000000 Example 2 - A progress tracking use-case It is often quite useful to track the progress of a computational expensive tasks to estimate its runtime. Here, the num_combination function can be used to compute the maximum number of loops of a permutations iterable from itertools: import itertools import sys import time from mlxtend.math import num_permutations items = {1, 2, 3, 4, 5, 6, 7, 8} max_iter = num_permutations(n=len(items), k=3, with_replacement=False) for idx, i in enumerate(itertools.permutations(items, r=3)): # do some computation with itemset i time.sleep(0.01) sys.stdout.write('\\rProgress: %d/%d' % (idx + 1, max_iter)) sys.stdout.flush() Progress: 336/336 API num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/","title":"Compute the Number of Permutations"},{"location":"user_guide/math/num_permutations/#compute-the-number-of-permutations","text":"A function to calculate the number of permutations for creating subsequences of k elements out of a sequence with n elements. from mlxtend.math import num_permutations","title":"Compute the Number of Permutations"},{"location":"user_guide/math/num_permutations/#overview","text":"Permutations are selections of items from a collection with regard to the order in which they appear (in contrast to combinations). For example, let's consider a permutation of 3 elements (k=3) from a collection of 5 elements (n=5): collection: {1, 2, 3, 4, 5} combination 1a: {1, 3, 5} combination 1b: {1, 5, 3} combination 1c: {3, 5, 1} ... combination 2: {1, 3, 4} In the example above the permutations 1a, 1b, and 1c, are the \"same combination\" but distinct permutations -- in combinations, the order does not matter, but in permutation it does matter. The number of ways to combine elements ( without replacement ) from a collection with size n into subsets of size k is computed via the binomial coefficient (\" n choose k \"): k!\\begin{pmatrix} n \\\\ k \\end{pmatrix} = k! \\cdot \\frac{n!}{k!(n-k)!} = \\frac{n!}{(n-k)!} To compute the number of permutations with replacement , we simply need to compute n^k .","title":"Overview"},{"location":"user_guide/math/num_permutations/#references","text":"https://en.wikipedia.org/wiki/Permutation","title":"References"},{"location":"user_guide/math/num_permutations/#example-1-compute-the-number-of-permutations","text":"from mlxtend.math import num_permutations c = num_permutations(n=20, k=8, with_replacement=False) print('Number of ways to permute 20 elements' ' into 8 subelements: %d' % c) Number of ways to permute 20 elements into 8 subelements: 5079110400 from mlxtend.math import num_permutations c = num_permutations(n=20, k=8, with_replacement=True) print('Number of ways to combine 20 elements' ' into 8 subelements (with replacement): %d' % c) Number of ways to combine 20 elements into 8 subelements (with replacement): 25600000000","title":"Example 1 - Compute the number of permutations"},{"location":"user_guide/math/num_permutations/#example-2-a-progress-tracking-use-case","text":"It is often quite useful to track the progress of a computational expensive tasks to estimate its runtime. Here, the num_combination function can be used to compute the maximum number of loops of a permutations iterable from itertools: import itertools import sys import time from mlxtend.math import num_permutations items = {1, 2, 3, 4, 5, 6, 7, 8} max_iter = num_permutations(n=len(items), k=3, with_replacement=False) for idx, i in enumerate(itertools.permutations(items, r=3)): # do some computation with itemset i time.sleep(0.01) sys.stdout.write('\\rProgress: %d/%d' % (idx + 1, max_iter)) sys.stdout.flush() Progress: 336/336","title":"Example 2 - A progress tracking use-case"},{"location":"user_guide/math/num_permutations/#api","text":"num_permutations(n, k, with_replacement=False) Function to calculate the number of possible permutations. Parameters n : int Total number of items. k : int Number of elements of the target itemset. with_replacement : bool Allows repeated elements if True. Returns permut : int Number of possible permutations. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/","title":"API"},{"location":"user_guide/math/vectorspace_dimensionality/","text":"Vectorspace Dimensionality A function to compute the number of dimensions a set of vectors (arranged as columns in a matrix) spans. from mlxtend.math import vectorspace_dimensionality Overview Given a set of vectors, arranged as columns in a matrix, the vectorspace_dimensionality computes the number of dimensions (i.e., hyper-volume) that the vectorspace spans using the Gram-Schmidt process [1]. In particular, since the Gram-Schmidt process yields vectors that are zero or normalized to 1 (i.e., an orthonormal vectorset if the input was a set of linearly independent vectors), the sum of the vector norms corresponds to the number of dimensions of a vectorset. References [1] https://en.wikipedia.org/wiki/Gram\u2013Schmidt_process Example 1 - Compute the dimensions of a vectorspace Let's assume we have the two basis vectors x=[1 \\;\\;\\; 0]^T and y=[0\\;\\;\\; 1]^T as columns in a matrix. Due to the linear independence of the two vectors, the space that they span is naturally a plane (2D space): import numpy as np from mlxtend.math import vectorspace_dimensionality a = np.array([[1, 0], [0, 1]]) vectorspace_dimensionality(a) 2 However, if one vector is a linear combination of the other, it's intuitive to see that the space the vectorset describes is merely a line, aka a 1D space: b = np.array([[1, 2], [0, 0]]) vectorspace_dimensionality(a) 2 If 3 vectors are all linearly independent of each other, the dimensionality of the vector space is a volume (i.e., a 3D space): d = np.array([[1, 9, 1], [3, 2, 2], [5, 4, 3]]) vectorspace_dimensionality(d) 3 Again, if a pair of vectors is linearly dependent (here: the 1st and the 2nd row), this reduces the dimensionality by 1: c = np.array([[1, 2, 1], [3, 6, 2], [5, 10, 3]]) vectorspace_dimensionality(c) 2 API vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] A set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set","title":"Vectorspace Dimensionality"},{"location":"user_guide/math/vectorspace_dimensionality/#vectorspace-dimensionality","text":"A function to compute the number of dimensions a set of vectors (arranged as columns in a matrix) spans. from mlxtend.math import vectorspace_dimensionality","title":"Vectorspace Dimensionality"},{"location":"user_guide/math/vectorspace_dimensionality/#overview","text":"Given a set of vectors, arranged as columns in a matrix, the vectorspace_dimensionality computes the number of dimensions (i.e., hyper-volume) that the vectorspace spans using the Gram-Schmidt process [1]. In particular, since the Gram-Schmidt process yields vectors that are zero or normalized to 1 (i.e., an orthonormal vectorset if the input was a set of linearly independent vectors), the sum of the vector norms corresponds to the number of dimensions of a vectorset.","title":"Overview"},{"location":"user_guide/math/vectorspace_dimensionality/#references","text":"[1] https://en.wikipedia.org/wiki/Gram\u2013Schmidt_process","title":"References"},{"location":"user_guide/math/vectorspace_dimensionality/#example-1-compute-the-dimensions-of-a-vectorspace","text":"Let's assume we have the two basis vectors x=[1 \\;\\;\\; 0]^T and y=[0\\;\\;\\; 1]^T as columns in a matrix. Due to the linear independence of the two vectors, the space that they span is naturally a plane (2D space): import numpy as np from mlxtend.math import vectorspace_dimensionality a = np.array([[1, 0], [0, 1]]) vectorspace_dimensionality(a) 2 However, if one vector is a linear combination of the other, it's intuitive to see that the space the vectorset describes is merely a line, aka a 1D space: b = np.array([[1, 2], [0, 0]]) vectorspace_dimensionality(a) 2 If 3 vectors are all linearly independent of each other, the dimensionality of the vector space is a volume (i.e., a 3D space): d = np.array([[1, 9, 1], [3, 2, 2], [5, 4, 3]]) vectorspace_dimensionality(d) 3 Again, if a pair of vectors is linearly dependent (here: the 1st and the 2nd row), this reduces the dimensionality by 1: c = np.array([[1, 2, 1], [3, 6, 2], [5, 10, 3]]) vectorspace_dimensionality(c) 2","title":"Example 1 - Compute the dimensions of a vectorspace"},{"location":"user_guide/math/vectorspace_dimensionality/#api","text":"vectorspace_dimensionality(ary) Computes the hyper-volume spanned by a vector set Parameters ary : array-like, shape=[num_vectors, num_vectors] A set of vectors (arranged as columns in a matrix) Returns dimensions : int An integer indicating the \"dimensionality\" hyper-volume spanned by the vector set","title":"API"},{"location":"user_guide/math/vectorspace_orthonormalization/","text":"Vectorspace Orthonormalization A function that converts a set of linearly independent vectors to a set of orthonormal basis vectors. from mlxtend.math import vectorspace_orthonormalization Overview The vectorspace_orthonormalization converts a set linearly independent vectors to a set of orthonormal basis vectors using the Gram-Schmidt process [1]. References [1] https://en.wikipedia.org/wiki/Gram\u2013Schmidt_process Example 1 - Convert a set of vector to an orthonormal basis Note that to convert a set of linearly independent vectors into a set of orthonormal basis vectors, the vectorspace_orthonormalization function expects the vectors to be arranged as columns of a matrix (here: NumPy array). Please keep in mind that the vectorspace_orthonormalization function also works for non-linearly independent vector sets; however, the resulting vectorset won't be orthonormal as a result. An easy way to check whether all vectors in the input set are linearly independent is to use the numpy.linalg.det (determinant) function. import numpy as np from mlxtend.math import vectorspace_orthonormalization a = np.array([[2, 0, 4, 12], [0, 2, 16, 4], [4, 16, 6, 2], [2, -12, 4, 6]]) s = '' if np.linalg.det(a) == 0.0: s = ' not' print('Input vectors are%s linearly independent' % s) vectorspace_orthonormalization(a) Input vectors are linearly independent array([[ 0.40824829, -0.1814885 , 0.04982278, 0.89325973], [ 0. , 0.1088931 , 0.99349591, -0.03328918], [ 0.81649658, 0.50816781, -0.06462163, -0.26631346], [ 0.40824829, -0.83484711, 0.07942048, -0.36063281]]) Note that scaling the inputs equally by a factor should leave the results unchanged: vectorspace_orthonormalization(a/2) array([[ 0.40824829, -0.1814885 , 0.04982278, 0.89325973], [ 0. , 0.1088931 , 0.99349591, -0.03328918], [ 0.81649658, 0.50816781, -0.06462163, -0.26631346], [ 0.40824829, -0.83484711, 0.07942048, -0.36063281]]) However, in case of linear dependence (the second column is a linear combination of the first column in the example below), the vector elements of one of the dependent vectors will become zero. (For a pair of linear dependent vectors, the one with the larger column index will be the one that's zero-ed.) a[:, 1] = a[:, 0] * 2 vectorspace_orthonormalization(a) array([[ 0.40824829, 0. , 0.04155858, 0.82364839], [ 0. , 0. , 0.99740596, -0.06501108], [ 0.81649658, 0. , -0.04155858, -0.52008861], [ 0.40824829, 0. , 0.04155858, 0.21652883]]) API vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of linearly independent vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] A set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"Vectorspace Orthonormalization"},{"location":"user_guide/math/vectorspace_orthonormalization/#vectorspace-orthonormalization","text":"A function that converts a set of linearly independent vectors to a set of orthonormal basis vectors. from mlxtend.math import vectorspace_orthonormalization","title":"Vectorspace Orthonormalization"},{"location":"user_guide/math/vectorspace_orthonormalization/#overview","text":"The vectorspace_orthonormalization converts a set linearly independent vectors to a set of orthonormal basis vectors using the Gram-Schmidt process [1].","title":"Overview"},{"location":"user_guide/math/vectorspace_orthonormalization/#references","text":"[1] https://en.wikipedia.org/wiki/Gram\u2013Schmidt_process","title":"References"},{"location":"user_guide/math/vectorspace_orthonormalization/#example-1-convert-a-set-of-vector-to-an-orthonormal-basis","text":"Note that to convert a set of linearly independent vectors into a set of orthonormal basis vectors, the vectorspace_orthonormalization function expects the vectors to be arranged as columns of a matrix (here: NumPy array). Please keep in mind that the vectorspace_orthonormalization function also works for non-linearly independent vector sets; however, the resulting vectorset won't be orthonormal as a result. An easy way to check whether all vectors in the input set are linearly independent is to use the numpy.linalg.det (determinant) function. import numpy as np from mlxtend.math import vectorspace_orthonormalization a = np.array([[2, 0, 4, 12], [0, 2, 16, 4], [4, 16, 6, 2], [2, -12, 4, 6]]) s = '' if np.linalg.det(a) == 0.0: s = ' not' print('Input vectors are%s linearly independent' % s) vectorspace_orthonormalization(a) Input vectors are linearly independent array([[ 0.40824829, -0.1814885 , 0.04982278, 0.89325973], [ 0. , 0.1088931 , 0.99349591, -0.03328918], [ 0.81649658, 0.50816781, -0.06462163, -0.26631346], [ 0.40824829, -0.83484711, 0.07942048, -0.36063281]]) Note that scaling the inputs equally by a factor should leave the results unchanged: vectorspace_orthonormalization(a/2) array([[ 0.40824829, -0.1814885 , 0.04982278, 0.89325973], [ 0. , 0.1088931 , 0.99349591, -0.03328918], [ 0.81649658, 0.50816781, -0.06462163, -0.26631346], [ 0.40824829, -0.83484711, 0.07942048, -0.36063281]]) However, in case of linear dependence (the second column is a linear combination of the first column in the example below), the vector elements of one of the dependent vectors will become zero. (For a pair of linear dependent vectors, the one with the larger column index will be the one that's zero-ed.) a[:, 1] = a[:, 0] * 2 vectorspace_orthonormalization(a) array([[ 0.40824829, 0. , 0.04155858, 0.82364839], [ 0. , 0. , 0.99740596, -0.06501108], [ 0.81649658, 0. , -0.04155858, -0.52008861], [ 0.40824829, 0. , 0.04155858, 0.21652883]])","title":"Example 1 - Convert a set of vector to an orthonormal basis"},{"location":"user_guide/math/vectorspace_orthonormalization/#api","text":"vectorspace_orthonormalization(ary, eps=1e-13) Transforms a set of column vectors to a orthonormal basis. Given a set of linearly independent vectors, this functions converts such column vectors, arranged in a matrix, into orthonormal basis vectors. Parameters ary : array-like, shape=[num_vectors, num_vectors] A set of vectors (arranged as columns in a matrix) eps : float (default: 1e-13) A small tolerance value to determine whether the vector norm is zero or not. Returns arr : array-like, shape=[num_vectors, num_vectors] An orthonormal set of vectors (arranged as columns)","title":"API"},{"location":"user_guide/plotting/category_scatter/","text":"Scatterplot with Categories A function to quickly produce a scatter plot colored by categories from a pandas DataFrame or NumPy ndarray object. from mlxtend.general_plotting import category_scatter Overview References - Example 1 - Category Scatter from Pandas DataFrames import pandas as pd from io import StringIO csvfile = \"\"\"label,x,y class1,10.0,8.04 class1,10.5,7.30 class2,8.3,5.5 class2,8.1,5.9 class3,3.5,3.5 class3,3.8,5.1\"\"\" df = pd.read_csv(StringIO(csvfile)) df label x y 0 class1 10.0 8.04 1 class1 10.5 7.30 2 class2 8.3 5.50 3 class2 8.1 5.90 4 class3 3.5 3.50 5 class3 3.8 5.10 Plotting the data where the categories are determined by the unique values in the label column label_col . The x and y values are simply the column names of the DataFrame that we want to plot. import matplotlib.pyplot as plt from mlxtend.plotting import category_scatter fig = category_scatter(x='x', y='y', label_col='label', data=df, legend_loc='upper left') Example 2 - Category Scatter from NumPy Arrays import numpy as np from io import BytesIO csvfile = \"\"\"1,10.0,8.04 1,10.5,7.30 2,8.3,5.5 2,8.1,5.9 3,3.5,3.5 3,3.8,5.1\"\"\" ary = np.genfromtxt(BytesIO(csvfile.encode()), delimiter=',') ary array([[ 1. , 10. , 8.04], [ 1. , 10.5 , 7.3 ], [ 2. , 8.3 , 5.5 ], [ 2. , 8.1 , 5.9 ], [ 3. , 3.5 , 3.5 ], [ 3. , 3.8 , 5.1 ]]) Now, pretending that the first column represents the labels, and the second and third column represent the x and y values, respectively. import matplotlib.pyplot as plt from mlxtend.plotting import category_scatter fix = category_scatter(x=1, y=2, label_col=0, data=ary, legend_loc='upper left') API category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/","title":"Scatterplot with Categories"},{"location":"user_guide/plotting/category_scatter/#scatterplot-with-categories","text":"A function to quickly produce a scatter plot colored by categories from a pandas DataFrame or NumPy ndarray object. from mlxtend.general_plotting import category_scatter","title":"Scatterplot with Categories"},{"location":"user_guide/plotting/category_scatter/#overview","text":"","title":"Overview"},{"location":"user_guide/plotting/category_scatter/#references","text":"-","title":"References"},{"location":"user_guide/plotting/category_scatter/#example-1-category-scatter-from-pandas-dataframes","text":"import pandas as pd from io import StringIO csvfile = \"\"\"label,x,y class1,10.0,8.04 class1,10.5,7.30 class2,8.3,5.5 class2,8.1,5.9 class3,3.5,3.5 class3,3.8,5.1\"\"\" df = pd.read_csv(StringIO(csvfile)) df label x y 0 class1 10.0 8.04 1 class1 10.5 7.30 2 class2 8.3 5.50 3 class2 8.1 5.90 4 class3 3.5 3.50 5 class3 3.8 5.10 Plotting the data where the categories are determined by the unique values in the label column label_col . The x and y values are simply the column names of the DataFrame that we want to plot. import matplotlib.pyplot as plt from mlxtend.plotting import category_scatter fig = category_scatter(x='x', y='y', label_col='label', data=df, legend_loc='upper left')","title":"Example 1 - Category Scatter from Pandas DataFrames"},{"location":"user_guide/plotting/category_scatter/#example-2-category-scatter-from-numpy-arrays","text":"import numpy as np from io import BytesIO csvfile = \"\"\"1,10.0,8.04 1,10.5,7.30 2,8.3,5.5 2,8.1,5.9 3,3.5,3.5 3,3.8,5.1\"\"\" ary = np.genfromtxt(BytesIO(csvfile.encode()), delimiter=',') ary array([[ 1. , 10. , 8.04], [ 1. , 10.5 , 7.3 ], [ 2. , 8.3 , 5.5 ], [ 2. , 8.1 , 5.9 ], [ 3. , 3.5 , 3.5 ], [ 3. , 3.8 , 5.1 ]]) Now, pretending that the first column represents the labels, and the second and third column represent the x and y values, respectively. import matplotlib.pyplot as plt from mlxtend.plotting import category_scatter fix = category_scatter(x=1, y=2, label_col=0, data=ary, legend_loc='upper left')","title":"Example 2 - Category Scatter from NumPy Arrays"},{"location":"user_guide/plotting/category_scatter/#api","text":"category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best') Scatter plot to plot categories in different colors/markerstyles. Parameters x : str or int DataFrame column name of the x-axis values or integer for the numpy ndarray column index. y : str DataFrame column name of the y-axis values or integer for the numpy ndarray column index data : Pandas DataFrame object or NumPy ndarray. markers : str Markers that are cycled through the label category. colors : tuple Colors that are cycled through the label category. alpha : float (default: 0.7) Parameter to control the transparency. markersize : float (default` : 20.0) Parameter to control the marker size. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlig.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/","title":"API"},{"location":"user_guide/plotting/checkerboard_plot/","text":"Checkerboard Plot Function to plot a checkerboard plot / heat map via matplotlib from mlxtend.plotting import checkerboard plot Overview Function to plot a checkerboard plot / heat map via matplotlib. References - Example 1 - Default from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt import numpy as np ary = np.random.random((5, 4)) brd = checkerboard_plot(ary) plt.show() Example 2 - Changing colors and labels from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt import numpy as np checkerboard_plot(ary, col_labels=['abc', 'def', 'ghi', 'jkl'], row_labels=['sample %d' % i for i in range(1, 6)], cell_colors=['skyblue', 'whitesmoke'], font_colors=['black', 'black'], figsize=(4.5, 5)) plt.show() API checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/","title":"Checkerboard Plot"},{"location":"user_guide/plotting/checkerboard_plot/#checkerboard-plot","text":"Function to plot a checkerboard plot / heat map via matplotlib from mlxtend.plotting import checkerboard plot","title":"Checkerboard Plot"},{"location":"user_guide/plotting/checkerboard_plot/#overview","text":"Function to plot a checkerboard plot / heat map via matplotlib.","title":"Overview"},{"location":"user_guide/plotting/checkerboard_plot/#references","text":"-","title":"References"},{"location":"user_guide/plotting/checkerboard_plot/#example-1-default","text":"from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt import numpy as np ary = np.random.random((5, 4)) brd = checkerboard_plot(ary) plt.show()","title":"Example 1 - Default"},{"location":"user_guide/plotting/checkerboard_plot/#example-2-changing-colors-and-labels","text":"from mlxtend.plotting import checkerboard_plot import matplotlib.pyplot as plt import numpy as np checkerboard_plot(ary, col_labels=['abc', 'def', 'ghi', 'jkl'], row_labels=['sample %d' % i for i in range(1, 6)], cell_colors=['skyblue', 'whitesmoke'], font_colors=['black', 'black'], figsize=(4.5, 5)) plt.show()","title":"Example 2 - Changing colors and labels"},{"location":"user_guide/plotting/checkerboard_plot/#api","text":"checkerboard_plot(ary, cell_colors=('white', 'black'), font_colors=('black', 'white'), fmt='%.1f', figsize=None, row_labels=None, col_labels=None, fontsize=None) Plot a checkerboard table / heatmap via matplotlib. Parameters ary : array-like, shape = [n, m] A 2D Nnumpy array. cell_colors : tuple or list (default: ('white', 'black')) Tuple or list containing the two colors of the checkerboard pattern. font_colors : tuple or list (default: ('black', 'white')) Font colors corresponding to the cell colors. figsize : tuple (default: (2.5, 2.5)) Height and width of the figure fmt : str (default: '%.1f') Python string formatter for cell values. The default '%.1f' results in floats with 1 digit after the decimal point. Use '%d' to show numbers as integers. row_labels : list (default: None) List of the row labels. Uses the array row indices 0 to n by default. col_labels : list (default: None) List of the column labels. Uses the array column indices 0 to m by default. fontsize : int (default: None) Specifies the font size of the checkerboard table. Uses matplotlib's default if None. Returns fig : matplotlib Figure object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/","title":"API"},{"location":"user_guide/plotting/ecdf/","text":"Empirical Cumulative Distribution Function Plot A function to conveniently plot an empirical cumulative distribution function. from mlxtend.ecdf import ecdf Overview A function to conveniently plot an empirical cumulative distribution function (ECDF) and adding percentile thresholds for exploratory data analysis. References - Example 1 - ECDF from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() ax, _, _ = ecdf(x=X[:, 0], x_label='sepal length (cm)') plt.show() Example 2 - Multiple ECDFs from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() # first ecdf x1 = X[:, 0] ax, _, _ = ecdf(x1, x_label='cm') # second ecdf x2 = X[:, 1] ax, _, _ = ecdf(x2, ax=ax) plt.legend(['sepal length', 'sepal width']) plt.show() Example 3 - ECDF with Percentile Thresholds from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() ax, threshold, count = ecdf(x=X[:, 0], x_label='sepal length (cm)', percentile=0.8) plt.show() print('Feature threshold at the 80th percentile:', threshold) print('Number of samples below the threshold:', count) Feature threshold at the 80th percentile: 6.5 Number of samples below the threshold: 120 API ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/","title":"Empirical Cumulative Distribution Function Plot"},{"location":"user_guide/plotting/ecdf/#empirical-cumulative-distribution-function-plot","text":"A function to conveniently plot an empirical cumulative distribution function. from mlxtend.ecdf import ecdf","title":"Empirical Cumulative Distribution Function Plot"},{"location":"user_guide/plotting/ecdf/#overview","text":"A function to conveniently plot an empirical cumulative distribution function (ECDF) and adding percentile thresholds for exploratory data analysis.","title":"Overview"},{"location":"user_guide/plotting/ecdf/#references","text":"-","title":"References"},{"location":"user_guide/plotting/ecdf/#example-1-ecdf","text":"from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() ax, _, _ = ecdf(x=X[:, 0], x_label='sepal length (cm)') plt.show()","title":"Example 1 - ECDF"},{"location":"user_guide/plotting/ecdf/#example-2-multiple-ecdfs","text":"from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() # first ecdf x1 = X[:, 0] ax, _, _ = ecdf(x1, x_label='cm') # second ecdf x2 = X[:, 1] ax, _, _ = ecdf(x2, ax=ax) plt.legend(['sepal length', 'sepal width']) plt.show()","title":"Example 2 - Multiple ECDFs"},{"location":"user_guide/plotting/ecdf/#example-3-ecdf-with-percentile-thresholds","text":"from mlxtend.data import iris_data from mlxtend.plotting import ecdf import matplotlib.pyplot as plt X, y = iris_data() ax, threshold, count = ecdf(x=X[:, 0], x_label='sepal length (cm)', percentile=0.8) plt.show() print('Feature threshold at the 80th percentile:', threshold) print('Number of samples below the threshold:', count) Feature threshold at the 80th percentile: 6.5 Number of samples below the threshold: 120","title":"Example 3 - ECDF with Percentile Thresholds"},{"location":"user_guide/plotting/ecdf/#api","text":"ecdf(x, y_label='ECDF', x_label=None, ax=None, percentile=None, ecdf_color=None, ecdf_marker='o', percentile_color='black', percentile_linestyle='--') Plots an Empirical Cumulative Distribution Function Parameters x : array or list, shape=[n_samples,] Array-like object containing the feature values y_label : str (default='ECDF') Text label for the y-axis x_label : str (default=None) Text label for the x-axis ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None percentile : float (default=None) Float between 0 and 1 for plotting a percentile threshold line ecdf_color : matplotlib color (default=None) Color for the ECDF plot; uses matplotlib defaults if None ecdf_marker : matplotlib marker (default='o') Marker style for the ECDF plot percentile_color : matplotlib color (default='black') Color for the percentile threshold if percentile is not None percentile_linestyle : matplotlib linestyle (default='--') Line style for the percentile threshold if percentile is not None Returns ax : matplotlib.axes.Axes object percentile_threshold : float Feature threshold at the percentile or None if percentile=None percentile_count : Number of if percentile is not None Number of samples that have a feature less or equal than the feature threshold at a percentile threshold or None if percentile=None Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/","title":"API"},{"location":"user_guide/plotting/enrichment_plot/","text":"Enrichment Plot A function to plot step plots of cumulative counts. from mlxtend.general_plotting import category_scatter Overview In enrichment plots, the y-axis can be interpreted as \"how many samples are less or equal to the corresponding x-axis label.\" References - Example 1 - Enrichment Plots from Pandas DataFrames import pandas as pd s1 = [1.1, 1.5] s2 = [2.1, 1.8] s3 = [3.1, 2.1] s4 = [3.9, 2.5] data = [s1, s2, s3, s4] df = pd.DataFrame(data, columns=['X1', 'X2']) df X1 X2 0 1.1 1.5 1 2.1 1.8 2 3.1 2.1 3 3.9 2.5 Plotting the data where the categories are determined by the unique values in the label column label_col . The x and y values are simply the column names of the DataFrame that we want to plot. import matplotlib.pyplot as plt from mlxtend.plotting import enrichment_plot ax = enrichment_plot(df, legend_loc='upper left') API enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/","title":"Enrichment Plot"},{"location":"user_guide/plotting/enrichment_plot/#enrichment-plot","text":"A function to plot step plots of cumulative counts. from mlxtend.general_plotting import category_scatter","title":"Enrichment Plot"},{"location":"user_guide/plotting/enrichment_plot/#overview","text":"In enrichment plots, the y-axis can be interpreted as \"how many samples are less or equal to the corresponding x-axis label.\"","title":"Overview"},{"location":"user_guide/plotting/enrichment_plot/#references","text":"-","title":"References"},{"location":"user_guide/plotting/enrichment_plot/#example-1-enrichment-plots-from-pandas-dataframes","text":"import pandas as pd s1 = [1.1, 1.5] s2 = [2.1, 1.8] s3 = [3.1, 2.1] s4 = [3.9, 2.5] data = [s1, s2, s3, s4] df = pd.DataFrame(data, columns=['X1', 'X2']) df X1 X2 0 1.1 1.5 1 2.1 1.8 2 3.1 2.1 3 3.9 2.5 Plotting the data where the categories are determined by the unique values in the label column label_col . The x and y values are simply the column names of the DataFrame that we want to plot. import matplotlib.pyplot as plt from mlxtend.plotting import enrichment_plot ax = enrichment_plot(df, legend_loc='upper left')","title":"Example 1 - Enrichment Plots from Pandas DataFrames"},{"location":"user_guide/plotting/enrichment_plot/#api","text":"enrichment_plot(df, colors='bgrkcy', markers=' ', linestyles='-', alpha=0.5, lw=2, where='post', grid=True, count_label='Count', xlim='auto', ylim='auto', invert_axes=False, legend_loc='best', ax=None) Plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where columns represent the different categories. colors: str (default: 'bgrcky') The colors of the bars. markers : str (default: ' ') Matplotlib markerstyles, e.g, 'sov' for square,circle, and triangle markers. linestyles : str (default: '-') Matplotlib linestyles, e.g., '-,--' to cycle normal and dashed lines. Note that the different linestyles need to be separated by commas. alpha : float (default: 0.5) Transparency level from 0.0 to 1.0. lw : int or float (default: 2) Linewidth parameter. where : {'post', 'pre', 'mid'} (default: 'post') Starting location of the steps. grid : bool (default: True ) Plots a grid if True. count_label : str (default: 'Count') Label for the \"Count\"-axis. xlim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the x-axis range. ylim : 'auto' or array-like [min, max] (default: 'auto') Min and maximum position of the y-axis range. invert_axes : bool (default: False) Plots count on the x-axis if True. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False ax : matplotlib axis, optional (default: None) Use this axis for plotting or make a new one otherwise Returns ax : matplotlib axis Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/","title":"API"},{"location":"user_guide/plotting/plot_confusion_matrix/","text":"Confusion Matrix Utility function for visualizing confusion matrices via matplotlib from mlxtend.plotting import plot_confusion_matrix Overview Confusion Matrix For more information on confusion matrices, please see mlxtend.evaluate.confusion_matrix . References - Example 1 - Binary from mlxtend.plotting import plot_confusion_matrix import matplotlib.pyplot as plt import numpy as np binary = np.array([[4, 1], [1, 2]]) fig, ax = plot_confusion_matrix(conf_mat=binary) plt.show() Example 2 - Binary absolute and relative with colorbar binary = np.array([[4, 1], [1, 2]]) fig, ax = plot_confusion_matrix(conf_mat=binary, show_absolute=True, show_normed=True, colorbar=True) plt.show() Example 3 - Multiclass relative multiclass = np.array([[2, 1, 0, 0], [1, 2, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) fig, ax = plot_confusion_matrix(conf_mat=multiclass, colorbar=True, show_absolute=False, show_normed=True) plt.show() API plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/","title":"Confusion Matrix"},{"location":"user_guide/plotting/plot_confusion_matrix/#confusion-matrix","text":"Utility function for visualizing confusion matrices via matplotlib from mlxtend.plotting import plot_confusion_matrix","title":"Confusion Matrix"},{"location":"user_guide/plotting/plot_confusion_matrix/#overview","text":"","title":"Overview"},{"location":"user_guide/plotting/plot_confusion_matrix/#confusion-matrix_1","text":"For more information on confusion matrices, please see mlxtend.evaluate.confusion_matrix .","title":"Confusion Matrix"},{"location":"user_guide/plotting/plot_confusion_matrix/#references","text":"-","title":"References"},{"location":"user_guide/plotting/plot_confusion_matrix/#example-1-binary","text":"from mlxtend.plotting import plot_confusion_matrix import matplotlib.pyplot as plt import numpy as np binary = np.array([[4, 1], [1, 2]]) fig, ax = plot_confusion_matrix(conf_mat=binary) plt.show()","title":"Example 1 - Binary"},{"location":"user_guide/plotting/plot_confusion_matrix/#example-2-binary-absolute-and-relative-with-colorbar","text":"binary = np.array([[4, 1], [1, 2]]) fig, ax = plot_confusion_matrix(conf_mat=binary, show_absolute=True, show_normed=True, colorbar=True) plt.show()","title":"Example 2 - Binary absolute and relative with colorbar"},{"location":"user_guide/plotting/plot_confusion_matrix/#example-3-multiclass-relative","text":"multiclass = np.array([[2, 1, 0, 0], [1, 2, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) fig, ax = plot_confusion_matrix(conf_mat=multiclass, colorbar=True, show_absolute=False, show_normed=True) plt.show()","title":"Example 3 - Multiclass relative"},{"location":"user_guide/plotting/plot_confusion_matrix/#api","text":"plot_confusion_matrix(conf_mat, hide_spines=False, hide_ticks=False, figsize=None, cmap=None, colorbar=False, show_absolute=True, show_normed=False) Plot a confusion matrix via matplotlib. Parameters conf_mat : array-like, shape = [n_classes, n_classes] Confusion matrix from evaluate.confusion matrix. hide_spines : bool (default: False) Hides axis spines if True. hide_ticks : bool (default: False) Hides axis ticks if True figsize : tuple (default: (2.5, 2.5)) Height and width of the figure cmap : matplotlib colormap (default: None ) Uses matplotlib.pyplot.cm.Blues if None colorbar : bool (default: False) Shows a colorbar if True show_absolute : bool (default: True) Shows absolute confusion matrix coefficients if True. At least one of show_absolute or show_normed must be True. show_normed : bool (default: False) Shows normed confusion matrix coefficients if True. The normed confusion matrix coefficients give the proportion of training examples per class that are assigned the correct label. At least one of show_absolute or show_normed must be True. Returns fig, ax : matplotlib.pyplot subplot objects Figure and axis elements of the subplot. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/","title":"API"},{"location":"user_guide/plotting/plot_decision_regions/","text":"Plotting Decision Regions A function for plotting decision regions of classifiers in 1 or 2 dimensions. from mlxtend.plotting import plot_decision_regions References Example 1 - Decision regions in 2D from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show() Example 2 - Decision regions in 1D from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, 2] X = X[:, None] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.title('SVM on Iris') plt.show() Example 3 - Decision Region Grids from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn import datasets import numpy as np # Initializing Classifiers clf1 = LogisticRegression(random_state=1, solver='newton-cg', multi_class='multinomial') clf2 = RandomForestClassifier(random_state=1, n_estimators=100) clf3 = GaussianNB() clf4 = SVC(gamma='auto') # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0,2]] y = iris.target import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() Example 4 - Highlighting Test Data Points from mlxtend.plotting import plot_decision_regions from mlxtend.preprocessing import shuffle_arrays_unison import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X, y = iris.data[:, [0,2]], iris.target X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=3) X_train, y_train = X[:100], y[:100] X_test, y_test = X[100:], y[100:] # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X_train, y_train) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2, X_highlight=X_test) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show() Example 5 - Evaluating Classifier Behavior on Non-Linear Problems from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC # Initializing Classifiers clf1 = LogisticRegression(random_state=1, solver='lbfgs') clf2 = RandomForestClassifier(n_estimators=100, random_state=1) clf3 = GaussianNB() clf4 = SVC(gamma='auto') # Loading Plotting Utilities import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import itertools from mlxtend.plotting import plot_decision_regions import numpy as np XOR xx, yy = np.meshgrid(np.linspace(-3, 3, 50), np.linspace(-3, 3, 50)) rng = np.random.RandomState(0) X = rng.randn(300, 2) y = np.array(np.logical_xor(X[:, 0] > 0, X[:, 1] > 0), dtype=int) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() Half-Moons from sklearn.datasets import make_moons X, y = make_moons(n_samples=100, random_state=123) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() Concentric Circles from sklearn.datasets import make_circles X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show() Example 6 - Working with existing axes objects using subplots import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn import datasets import numpy as np # Loading some example data iris = datasets.load_iris() X = iris.data[:, 2] X = X[:, None] y = iris.target # Initializing and fitting classifiers clf1 = LogisticRegression(random_state=1, solver='lbfgs', multi_class='multinomial') clf2 = GaussianNB() clf1.fit(X, y) clf2.fit(X, y) fig, axes = plt.subplots(1, 2, figsize=(10, 3)) fig = plot_decision_regions(X=X, y=y, clf=clf1, ax=axes[0], legend=2) fig = plot_decision_regions(X=X, y=y, clf=clf2, ax=axes[1], legend=1) plt.show() Example 7 - Decision regions with more than two training features from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data X, y = datasets.make_blobs(n_samples=600, n_features=3, centers=[[2, 2, -2],[-2, -2, 2]], cluster_std=[2, 2], random_state=2) # Training a classifier svm = SVC(gamma='auto') svm.fit(X, y) # Plotting decision regions fig, ax = plt.subplots() # Decision region for feature 3 = 1.5 value = 1.5 # Plot training sample with feature 3 = 1.5 +/- 0.75 width = 0.75 plot_decision_regions(X, y, clf=svm, filler_feature_values={2: value}, filler_feature_ranges={2: width}, legend=2, ax=ax) ax.set_xlabel('Feature 1') ax.set_ylabel('Feature 2') ax.set_title('Feature 3 = {}'.format(value)) # Adding axes annotations fig.suptitle('SVM on make_blobs') plt.show() Example 8 - Grid of decision region slices from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data X, y = datasets.make_blobs(n_samples=500, n_features=3, centers=[[2, 2, -2],[-2, -2, 2]], cluster_std=[2, 2], random_state=2) # Training a classifier svm = SVC(gamma='auto') svm.fit(X, y) # Plotting decision regions fig, axarr = plt.subplots(2, 2, figsize=(10,8), sharex=True, sharey=True) values = [-4.0, -1.0, 1.0, 4.0] width = 0.75 for value, ax in zip(values, axarr.flat): plot_decision_regions(X, y, clf=svm, filler_feature_values={2: value}, filler_feature_ranges={2: width}, legend=2, ax=ax) ax.set_xlabel('Feature 1') ax.set_ylabel('Feature 2') ax.set_title('Feature 3 = {}'.format(value)) # Adding axes annotations fig.suptitle('SVM on make_blobs') plt.show() Example 9 - Customizing the plotting style from mlxtend.plotting import plot_decision_regions from mlxtend.preprocessing import shuffle_arrays_unison import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=3) X_train, y_train = X[:100], y[:100] X_test, y_test = X[100:], y[100:] # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X_train, y_train) # Specify keyword arguments to be passed to underlying plotting functions scatter_kwargs = {'s': 120, 'edgecolor': None, 'alpha': 0.7} contourf_kwargs = {'alpha': 0.2} scatter_highlight_kwargs = {'s': 120, 'label': 'Test data', 'alpha': 0.7} # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2, X_highlight=X_test, scatter_kwargs=scatter_kwargs, contourf_kwargs=contourf_kwargs, scatter_highlight_kwargs=scatter_highlight_kwargs) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show() Example 10 - Providing your own legend labels Custom legend labels can be provided by returning the axis object(s) from the plot_decision_region function and then getting the handles and labels of the legend. Custom handles (i.e., labels) can then be provided via ax.legend ax = plot_decision_regions(X, y, clf=svm, legend=0) handles, labels = ax.get_legend_handles_labels() ax.legend(handles, ['class 0', 'class 1', 'class 2'], framealpha=0.3, scatterpoints=1) An example is shown below. from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions ax = plot_decision_regions(X, y, clf=svm, legend=0) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') handles, labels = ax.get_legend_handles_labels() ax.legend(handles, ['class square', 'class triangle', 'class circle'], framealpha=0.3, scatterpoints=1) plt.show() API plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/","title":"Plotting Decision Regions"},{"location":"user_guide/plotting/plot_decision_regions/#plotting-decision-regions","text":"A function for plotting decision regions of classifiers in 1 or 2 dimensions. from mlxtend.plotting import plot_decision_regions","title":"Plotting Decision Regions"},{"location":"user_guide/plotting/plot_decision_regions/#references","text":"","title":"References"},{"location":"user_guide/plotting/plot_decision_regions/#example-1-decision-regions-in-2d","text":"from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show()","title":"Example 1 - Decision regions in 2D"},{"location":"user_guide/plotting/plot_decision_regions/#example-2-decision-regions-in-1d","text":"from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, 2] X = X[:, None] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.title('SVM on Iris') plt.show()","title":"Example 2 - Decision regions in 1D"},{"location":"user_guide/plotting/plot_decision_regions/#example-3-decision-region-grids","text":"from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn import datasets import numpy as np # Initializing Classifiers clf1 = LogisticRegression(random_state=1, solver='newton-cg', multi_class='multinomial') clf2 = RandomForestClassifier(random_state=1, n_estimators=100) clf3 = GaussianNB() clf4 = SVC(gamma='auto') # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0,2]] y = iris.target import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions import matplotlib.gridspec as gridspec import itertools gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show()","title":"Example 3 - Decision Region Grids"},{"location":"user_guide/plotting/plot_decision_regions/#example-4-highlighting-test-data-points","text":"from mlxtend.plotting import plot_decision_regions from mlxtend.preprocessing import shuffle_arrays_unison import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X, y = iris.data[:, [0,2]], iris.target X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=3) X_train, y_train = X[:100], y[:100] X_test, y_test = X[100:], y[100:] # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X_train, y_train) # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2, X_highlight=X_test) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show()","title":"Example 4 - Highlighting Test Data Points"},{"location":"user_guide/plotting/plot_decision_regions/#example-5-evaluating-classifier-behavior-on-non-linear-problems","text":"from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC # Initializing Classifiers clf1 = LogisticRegression(random_state=1, solver='lbfgs') clf2 = RandomForestClassifier(n_estimators=100, random_state=1) clf3 = GaussianNB() clf4 = SVC(gamma='auto') # Loading Plotting Utilities import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import itertools from mlxtend.plotting import plot_decision_regions import numpy as np","title":"Example 5 - Evaluating Classifier Behavior on Non-Linear Problems"},{"location":"user_guide/plotting/plot_decision_regions/#xor","text":"xx, yy = np.meshgrid(np.linspace(-3, 3, 50), np.linspace(-3, 3, 50)) rng = np.random.RandomState(0) X = rng.randn(300, 2) y = np.array(np.logical_xor(X[:, 0] > 0, X[:, 1] > 0), dtype=int) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show()","title":"XOR"},{"location":"user_guide/plotting/plot_decision_regions/#half-moons","text":"from sklearn.datasets import make_moons X, y = make_moons(n_samples=100, random_state=123) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show()","title":"Half-Moons"},{"location":"user_guide/plotting/plot_decision_regions/#concentric-circles","text":"from sklearn.datasets import make_circles X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2) gs = gridspec.GridSpec(2, 2) fig = plt.figure(figsize=(10,8)) labels = ['Logistic Regression', 'Random Forest', 'Naive Bayes', 'SVM'] for clf, lab, grd in zip([clf1, clf2, clf3, clf4], labels, itertools.product([0, 1], repeat=2)): clf.fit(X, y) ax = plt.subplot(gs[grd[0], grd[1]]) fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2) plt.title(lab) plt.show()","title":"Concentric Circles"},{"location":"user_guide/plotting/plot_decision_regions/#example-6-working-with-existing-axes-objects-using-subplots","text":"import matplotlib.pyplot as plt from mlxtend.plotting import plot_decision_regions from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn import datasets import numpy as np # Loading some example data iris = datasets.load_iris() X = iris.data[:, 2] X = X[:, None] y = iris.target # Initializing and fitting classifiers clf1 = LogisticRegression(random_state=1, solver='lbfgs', multi_class='multinomial') clf2 = GaussianNB() clf1.fit(X, y) clf2.fit(X, y) fig, axes = plt.subplots(1, 2, figsize=(10, 3)) fig = plot_decision_regions(X=X, y=y, clf=clf1, ax=axes[0], legend=2) fig = plot_decision_regions(X=X, y=y, clf=clf2, ax=axes[1], legend=1) plt.show()","title":"Example 6 - Working with existing axes objects using subplots"},{"location":"user_guide/plotting/plot_decision_regions/#example-7-decision-regions-with-more-than-two-training-features","text":"from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data X, y = datasets.make_blobs(n_samples=600, n_features=3, centers=[[2, 2, -2],[-2, -2, 2]], cluster_std=[2, 2], random_state=2) # Training a classifier svm = SVC(gamma='auto') svm.fit(X, y) # Plotting decision regions fig, ax = plt.subplots() # Decision region for feature 3 = 1.5 value = 1.5 # Plot training sample with feature 3 = 1.5 +/- 0.75 width = 0.75 plot_decision_regions(X, y, clf=svm, filler_feature_values={2: value}, filler_feature_ranges={2: width}, legend=2, ax=ax) ax.set_xlabel('Feature 1') ax.set_ylabel('Feature 2') ax.set_title('Feature 3 = {}'.format(value)) # Adding axes annotations fig.suptitle('SVM on make_blobs') plt.show()","title":"Example 7 - Decision regions with more than two training features"},{"location":"user_guide/plotting/plot_decision_regions/#example-8-grid-of-decision-region-slices","text":"from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data X, y = datasets.make_blobs(n_samples=500, n_features=3, centers=[[2, 2, -2],[-2, -2, 2]], cluster_std=[2, 2], random_state=2) # Training a classifier svm = SVC(gamma='auto') svm.fit(X, y) # Plotting decision regions fig, axarr = plt.subplots(2, 2, figsize=(10,8), sharex=True, sharey=True) values = [-4.0, -1.0, 1.0, 4.0] width = 0.75 for value, ax in zip(values, axarr.flat): plot_decision_regions(X, y, clf=svm, filler_feature_values={2: value}, filler_feature_ranges={2: width}, legend=2, ax=ax) ax.set_xlabel('Feature 1') ax.set_ylabel('Feature 2') ax.set_title('Feature 3 = {}'.format(value)) # Adding axes annotations fig.suptitle('SVM on make_blobs') plt.show()","title":"Example 8 - Grid of decision region slices"},{"location":"user_guide/plotting/plot_decision_regions/#example-9-customizing-the-plotting-style","text":"from mlxtend.plotting import plot_decision_regions from mlxtend.preprocessing import shuffle_arrays_unison import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=3) X_train, y_train = X[:100], y[:100] X_test, y_test = X[100:], y[100:] # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X_train, y_train) # Specify keyword arguments to be passed to underlying plotting functions scatter_kwargs = {'s': 120, 'edgecolor': None, 'alpha': 0.7} contourf_kwargs = {'alpha': 0.2} scatter_highlight_kwargs = {'s': 120, 'label': 'Test data', 'alpha': 0.7} # Plotting decision regions plot_decision_regions(X, y, clf=svm, legend=2, X_highlight=X_test, scatter_kwargs=scatter_kwargs, contourf_kwargs=contourf_kwargs, scatter_highlight_kwargs=scatter_highlight_kwargs) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') plt.show()","title":"Example 9 - Customizing the plotting style"},{"location":"user_guide/plotting/plot_decision_regions/#example-10-providing-your-own-legend-labels","text":"Custom legend labels can be provided by returning the axis object(s) from the plot_decision_region function and then getting the handles and labels of the legend. Custom handles (i.e., labels) can then be provided via ax.legend ax = plot_decision_regions(X, y, clf=svm, legend=0) handles, labels = ax.get_legend_handles_labels() ax.legend(handles, ['class 0', 'class 1', 'class 2'], framealpha=0.3, scatterpoints=1) An example is shown below. from mlxtend.plotting import plot_decision_regions import matplotlib.pyplot as plt from sklearn import datasets from sklearn.svm import SVC # Loading some example data iris = datasets.load_iris() X = iris.data[:, [0, 2]] y = iris.target # Training a classifier svm = SVC(C=0.5, kernel='linear') svm.fit(X, y) # Plotting decision regions ax = plot_decision_regions(X, y, clf=svm, legend=0) # Adding axes annotations plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.title('SVM on Iris') handles, labels = ax.get_legend_handles_labels() ax.legend(handles, ['class square', 'class triangle', 'class circle'], framealpha=0.3, scatterpoints=1) plt.show()","title":"Example 10 - Providing your own legend labels"},{"location":"user_guide/plotting/plot_decision_regions/#api","text":"plot_decision_regions(X, y, clf, feature_index=None, filler_feature_values=None, filler_feature_ranges=None, ax=None, X_highlight=None, res=None, legend=1, hide_spines=True, markers='s^oxv<>', colors='#1f77b4,#ff7f0e,#3ca02c,#d62728,#9467bd,#8c564b,#e377c2,#7f7f7f,#bcbd22,#17becf', scatter_kwargs=None, contourf_kwargs=None, scatter_highlight_kwargs=None) Plot decision regions of a classifier. Please note that this functions assumes that class labels are labeled consecutively, e.g,. 0, 1, 2, 3, 4, and 5. If you have class labels with integer labels > 4, you may want to provide additional colors and/or markers as colors and markers arguments. See http://matplotlib.org/examples/color/named_colors.html for more information. Parameters X : array-like, shape = [n_samples, n_features] Feature Matrix. y : array-like, shape = [n_samples] True class labels. clf : Classifier object. Must have a .predict method. feature_index : array-like (default: (0,) for 1D, (0, 1) otherwise) Feature indices to use for plotting. The first index in feature_index will be on the x-axis, the second index will be on the y-axis. filler_feature_values : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. filler_feature_ranges : dict (default: None) Only needed for number features > 2. Dictionary of feature index-value pairs for the features not being plotted. Will use the ranges provided to select training samples for plotting. ax : matplotlib.axes.Axes (default: None) An existing matplotlib Axes. Creates one if ax=None. X_highlight : array-like, shape = [n_samples, n_features] (default: None) An array with data points that are used to highlight samples in X . res : float or array-like, shape = (2,) (default: None) This parameter was used to define the grid width, but it has been deprecated in favor of determining the number of points given the figure DPI and size automatically for optimal results and computational efficiency. To increase the resolution, it's is recommended to use to provide a dpi argument via matplotlib, e.g., plt.figure(dpi=600)`. hide_spines : bool (default: True) Hide axis spines if True. legend : int (default: 1) Integer to specify the legend location. No legend if legend is 0. markers : str (default: 's^oxv<>') Scatterplot markers. colors : str (default: 'red,blue,limegreen,gray,cyan') Comma separated list of colors. scatter_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. contourf_kwargs : dict (default: None) Keyword arguments for underlying matplotlib contourf function. scatter_highlight_kwargs : dict (default: None) Keyword arguments for underlying matplotlib scatter function. Returns ax : matplotlib.axes.Axes object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/","title":"API"},{"location":"user_guide/plotting/plot_learning_curves/","text":"Plotting Learning Curves A function to plot learning curves for classifiers. Learning curves are extremely useful to analyze if a model is suffering from over- or under-fitting (high variance or high bias). The function can be imported via from mlxtend.plotting import plot_learning_curves References - Example 1 from mlxtend.plotting import plot_learning_curves import matplotlib.pyplot as plt from mlxtend.data import iris_data from mlxtend.preprocessing import shuffle_arrays_unison from sklearn.neighbors import KNeighborsClassifier import numpy as np # Loading some example data X, y = iris_data() X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=123) X_train, X_test = X[:100], X[100:] y_train, y_test = y[:100], y[100:] clf = KNeighborsClassifier(n_neighbors=5) plot_learning_curves(X_train, y_train, X_test, y_test, clf) plt.show() API plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/learning_curves/","title":"Plotting Learning Curves"},{"location":"user_guide/plotting/plot_learning_curves/#plotting-learning-curves","text":"A function to plot learning curves for classifiers. Learning curves are extremely useful to analyze if a model is suffering from over- or under-fitting (high variance or high bias). The function can be imported via from mlxtend.plotting import plot_learning_curves","title":"Plotting Learning Curves"},{"location":"user_guide/plotting/plot_learning_curves/#references","text":"-","title":"References"},{"location":"user_guide/plotting/plot_learning_curves/#example-1","text":"from mlxtend.plotting import plot_learning_curves import matplotlib.pyplot as plt from mlxtend.data import iris_data from mlxtend.preprocessing import shuffle_arrays_unison from sklearn.neighbors import KNeighborsClassifier import numpy as np # Loading some example data X, y = iris_data() X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=123) X_train, X_test = X[:100], X[100:] y_train, y_test = y[:100], y[100:] clf = KNeighborsClassifier(n_neighbors=5) plot_learning_curves(X_train, y_train, X_test, y_test, clf) plt.show()","title":"Example 1"},{"location":"user_guide/plotting/plot_learning_curves/#api","text":"plot_learning_curves(X_train, y_train, X_test, y_test, clf, train_marker='o', test_marker='^', scoring='misclassification error', suppress_plot=False, print_model=True, style='fivethirtyeight', legend_loc='best') Plots learning curves of a classifier. Parameters X_train : array-like, shape = [n_samples, n_features] Feature matrix of the training dataset. y_train : array-like, shape = [n_samples] True class labels of the training dataset. X_test : array-like, shape = [n_samples, n_features] Feature matrix of the test dataset. y_test : array-like, shape = [n_samples] True class labels of the test dataset. clf : Classifier object. Must have a .predict .fit method. train_marker : str (default: 'o') Marker for the training set line plot. test_marker : str (default: '^') Marker for the test set line plot. scoring : str (default: 'misclassification error') If not 'misclassification error', accepts the following metrics (from scikit-learn): {'accuracy', 'average_precision', 'f1_micro', 'f1_macro', 'f1_weighted', 'f1_samples', 'log_loss', 'precision', 'recall', 'roc_auc', 'adjusted_rand_score', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'r2'} suppress_plot=False : bool (default: False) Suppress matplotlib plots if True. Recommended for testing purposes. print_model : bool (default: True) Print model parameters in plot title if True. style : str (default: 'fivethirtyeight') Matplotlib style legend_loc : str (default: 'best') Where to place the plot legend: {'best', 'upper left', 'upper right', 'lower left', 'lower right'} Returns errors : (training_error, test_error): tuple of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/learning_curves/","title":"API"},{"location":"user_guide/plotting/plot_linear_regression/","text":"Linear Regression Plot A function to plot linear regression fits. from mlxtend.plotting import plot_linear_regression Overview The plot_linear_regression is a convenience function that uses scikit-learn's linear_model.LinearRegression to fit a linear model and SciPy's stats.pearsonr to calculate the correlation coefficient. References - Example 1 - Ordinary Least Squares Simple Linear Regression import matplotlib.pyplot as plt from mlxtend.plotting import plot_linear_regression import numpy as np X = np.array([4, 8, 13, 26, 31, 10, 8, 30, 18, 12, 20, 5, 28, 18, 6, 31, 12, 12, 27, 11, 6, 14, 25, 7, 13,4, 15, 21, 15]) y = np.array([14, 24, 22, 59, 66, 25, 18, 60, 39, 32, 53, 18, 55, 41, 28, 61, 35, 36, 52, 23, 19, 25, 73, 16, 32, 14, 31, 43, 34]) intercept, slope, corr_coeff = plot_linear_regression(X, y) plt.show() API plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/","title":"Linear Regression Plot"},{"location":"user_guide/plotting/plot_linear_regression/#linear-regression-plot","text":"A function to plot linear regression fits. from mlxtend.plotting import plot_linear_regression","title":"Linear Regression Plot"},{"location":"user_guide/plotting/plot_linear_regression/#overview","text":"The plot_linear_regression is a convenience function that uses scikit-learn's linear_model.LinearRegression to fit a linear model and SciPy's stats.pearsonr to calculate the correlation coefficient.","title":"Overview"},{"location":"user_guide/plotting/plot_linear_regression/#references","text":"-","title":"References"},{"location":"user_guide/plotting/plot_linear_regression/#example-1-ordinary-least-squares-simple-linear-regression","text":"import matplotlib.pyplot as plt from mlxtend.plotting import plot_linear_regression import numpy as np X = np.array([4, 8, 13, 26, 31, 10, 8, 30, 18, 12, 20, 5, 28, 18, 6, 31, 12, 12, 27, 11, 6, 14, 25, 7, 13,4, 15, 21, 15]) y = np.array([14, 24, 22, 59, 66, 25, 18, 60, 39, 32, 53, 18, 55, 41, 28, 61, 35, 36, 52, 23, 19, 25, 73, 16, 32, 14, 31, 43, 34]) intercept, slope, corr_coeff = plot_linear_regression(X, y) plt.show()","title":"Example 1 - Ordinary Least Squares Simple Linear Regression"},{"location":"user_guide/plotting/plot_linear_regression/#api","text":"plot_linear_regression(X, y, model=LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False), corr_func='pearsonr', scattercolor='blue', fit_style='k--', legend=True, xlim='auto') Plot a linear regression line fit. Parameters X : numpy array, shape = [n_samples,] Samples. y : numpy array, shape (n_samples,) Target values model: object (default: sklearn.linear_model.LinearRegression) Estimator object for regression. Must implement a .fit() and .predict() method. corr_func: str or function (default: 'pearsonr') Uses pearsonr from scipy.stats if corr_func='pearsonr'. to compute the regression slope. If not 'pearsonr', the corr_func , the corr_func parameter expects a function of the form func( , ) as inputs, which is expected to return a tuple (, ) . scattercolor: string (default: blue) Color of scatter plot points. fit_style: string (default: k--) Style for the line fit. legend: bool (default: True) Plots legend with corr_coeff coef., fit coef., and intercept values. xlim: array-like (x_min, x_max) or 'auto' (default: 'auto') X-axis limits for the linear line fit. Returns regression_fit : tuple intercept, slope, corr_coeff (float, float, float) Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/","title":"API"},{"location":"user_guide/plotting/plot_sequential_feature_selection/","text":"Plot Sequential Feature Selection A matplotlib utility function for visualizing results from feature_selection.SequentialFeatureSelector . from mlxtend.plotting import plot_sequential_feature_selection Overview for more information on sequential feature selection, please see feature_selection.SequentialFeatureSelector . Example 1 - Plotting the results from SequentialFeatureSelector from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs from mlxtend.feature_selection import SequentialFeatureSelector as SFS import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) sfs = SFS(knn, k_features=4, forward=True, floating=False, scoring='accuracy', cv=5) sfs = sfs.fit(X, y) fig1 = plot_sfs(sfs.get_metric_dict(), kind='std_dev') plt.ylim([0.8, 1]) plt.title('Sequential Forward Selection (w. StdDev)') plt.grid() plt.show() Features: 4/4 API plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/","title":"Plot Sequential Feature Selection"},{"location":"user_guide/plotting/plot_sequential_feature_selection/#plot-sequential-feature-selection","text":"A matplotlib utility function for visualizing results from feature_selection.SequentialFeatureSelector . from mlxtend.plotting import plot_sequential_feature_selection","title":"Plot Sequential Feature Selection"},{"location":"user_guide/plotting/plot_sequential_feature_selection/#overview","text":"for more information on sequential feature selection, please see feature_selection.SequentialFeatureSelector .","title":"Overview"},{"location":"user_guide/plotting/plot_sequential_feature_selection/#example-1-plotting-the-results-from-sequentialfeatureselector","text":"from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs from mlxtend.feature_selection import SequentialFeatureSelector as SFS import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target knn = KNeighborsClassifier(n_neighbors=4) sfs = SFS(knn, k_features=4, forward=True, floating=False, scoring='accuracy', cv=5) sfs = sfs.fit(X, y) fig1 = plot_sfs(sfs.get_metric_dict(), kind='std_dev') plt.ylim([0.8, 1]) plt.title('Sequential Forward Selection (w. StdDev)') plt.grid() plt.show() Features: 4/4","title":"Example 1 - Plotting the results from SequentialFeatureSelector"},{"location":"user_guide/plotting/plot_sequential_feature_selection/#api","text":"plot_sequential_feature_selection(metric_dict, kind='std_dev', color='blue', bcolor='steelblue', marker='o', alpha=0.2, ylabel='Performance', confidence_interval=0.95) Plot feature selection results. Parameters metric_dict : mlxtend.SequentialFeatureSelector.get_metric_dict() object kind : str (default: \"std_dev\") The kind of error bar or confidence interval in {'std_dev', 'std_err', 'ci', None}. color : str (default: \"blue\") Color of the lineplot (accepts any matplotlib color name) bcolor : str (default: \"steelblue\"). Color of the error bars / confidence intervals (accepts any matplotlib color name). marker : str (default: \"o\") Marker of the line plot (accepts any matplotlib marker name). alpha : float in [0, 1] (default: 0.2) Transparency of the error bars / confidence intervals. ylabel : str (default: \"Performance\") Y-axis label. confidence_interval : float (default: 0.95) Confidence level if kind='ci' . Returns fig : matplotlib.pyplot.figure() object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/","title":"API"},{"location":"user_guide/plotting/scatterplotmatrix/","text":"Scatter Plot Matrix A function to conveniently plot stacked bar plots in matplotlib using pandas DataFrame s. from mlxtend.plotting import scatterplotmatrix Overview A matplotlib convenience function for creating a scatterplot matrix. References - Example 1 - Simple Scatter Plot Matrix import matplotlib.pyplot as plt from mlxtend.data import iris_data from mlxtend.plotting import scatterplotmatrix X, y = iris_data() scatterplotmatrix(X, figsize=(10, 8)) plt.tight_layout() plt.show() Example 2 - Scatter Plot Matrix with Multiple Categories names = ['sepal length [cm]', 'sepal width [cm]', 'petal length [cm]', 'petal width [cm]'] fig, axes = scatterplotmatrix(X[y==0], figsize=(10, 8), alpha=0.5) fig, axes = scatterplotmatrix(X[y==1], fig_axes=(fig, axes), alpha=0.5) fig, axes = scatterplotmatrix(X[y==2], fig_axes=(fig, axes), alpha=0.5, names=names) plt.tight_layout() plt.show() API scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...)","title":"Scatter Plot Matrix"},{"location":"user_guide/plotting/scatterplotmatrix/#scatter-plot-matrix","text":"A function to conveniently plot stacked bar plots in matplotlib using pandas DataFrame s. from mlxtend.plotting import scatterplotmatrix","title":"Scatter Plot Matrix"},{"location":"user_guide/plotting/scatterplotmatrix/#overview","text":"A matplotlib convenience function for creating a scatterplot matrix.","title":"Overview"},{"location":"user_guide/plotting/scatterplotmatrix/#references","text":"-","title":"References"},{"location":"user_guide/plotting/scatterplotmatrix/#example-1-simple-scatter-plot-matrix","text":"import matplotlib.pyplot as plt from mlxtend.data import iris_data from mlxtend.plotting import scatterplotmatrix X, y = iris_data() scatterplotmatrix(X, figsize=(10, 8)) plt.tight_layout() plt.show()","title":"Example 1 - Simple Scatter Plot Matrix"},{"location":"user_guide/plotting/scatterplotmatrix/#example-2-scatter-plot-matrix-with-multiple-categories","text":"names = ['sepal length [cm]', 'sepal width [cm]', 'petal length [cm]', 'petal width [cm]'] fig, axes = scatterplotmatrix(X[y==0], figsize=(10, 8), alpha=0.5) fig, axes = scatterplotmatrix(X[y==1], fig_axes=(fig, axes), alpha=0.5) fig, axes = scatterplotmatrix(X[y==2], fig_axes=(fig, axes), alpha=0.5, names=names) plt.tight_layout() plt.show()","title":"Example 2 - Scatter Plot Matrix with Multiple Categories"},{"location":"user_guide/plotting/scatterplotmatrix/#api","text":"scatterplotmatrix(X, fig_axes=None, names=None, figsize=(8, 8), alpha=1.0, kwargs) Lower triangular of a scatterplot matrix Parameters X : array-like, shape={num_examples, num_features} Design matrix containing data instances (examples) with multiple exploratory variables (features). fix_axes : tuple (default: None) A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...) names : list (default: None) A list of string names, which should have the same number of elements as there are features (columns) in X . figsize : tuple (default: (8, 8)) Height and width of the subplot grid. Ignored if fig_axes is not None . alpha : float (default: 1.0) Transparency for both the scatter plots and the histograms along the diagonal. **kwargs : kwargs Keyword arguments for the scatterplots. Returns fix_axes : tuple A (fig, axes) tuple, where fig is an figure object and axes is an axes object created via matplotlib, for example, by calling the pyplot subplot function fig, axes = plt.subplots(...)","title":"API"},{"location":"user_guide/plotting/stacked_barplot/","text":"Stacked Barplot A function to conveniently plot stacked bar plots in matplotlib using pandas DataFrame s. from mlxtend.plotting import category_scatter Overview A matplotlib convenience function for creating barplots from DataFrames where each sample is associated with several categories. References - Example 1 - Stacked Barplot from Pandas DataFrames import pandas as pd s1 = [1.0, 2.0, 3.0, 4.0] s2 = [1.4, 2.1, 2.9, 5.1] s3 = [1.9, 2.2, 3.5, 4.1] s4 = [1.4, 2.5, 3.5, 4.2] data = [s1, s2, s3, s4] df = pd.DataFrame(data, columns=['X1', 'X2', 'X3', 'X4']) df.columns = ['X1', 'X2', 'X3', 'X4'] df.index = ['Sample1', 'Sample2', 'Sample3', 'Sample4'] df X1 X2 X3 X4 Sample1 1.0 2.0 3.0 4.0 Sample2 1.4 2.1 2.9 5.1 Sample3 1.9 2.2 3.5 4.1 Sample4 1.4 2.5 3.5 4.2 By default, the index of the DataFrame is used as column labels, and the DataFrame columns are used for the plot legend. import matplotlib.pyplot as plt from mlxtend.plotting import stacked_barplot fig = stacked_barplot(df, rotation=45, legend_loc='best') API stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"Stacked Barplot"},{"location":"user_guide/plotting/stacked_barplot/#stacked-barplot","text":"A function to conveniently plot stacked bar plots in matplotlib using pandas DataFrame s. from mlxtend.plotting import category_scatter","title":"Stacked Barplot"},{"location":"user_guide/plotting/stacked_barplot/#overview","text":"A matplotlib convenience function for creating barplots from DataFrames where each sample is associated with several categories.","title":"Overview"},{"location":"user_guide/plotting/stacked_barplot/#references","text":"-","title":"References"},{"location":"user_guide/plotting/stacked_barplot/#example-1-stacked-barplot-from-pandas-dataframes","text":"import pandas as pd s1 = [1.0, 2.0, 3.0, 4.0] s2 = [1.4, 2.1, 2.9, 5.1] s3 = [1.9, 2.2, 3.5, 4.1] s4 = [1.4, 2.5, 3.5, 4.2] data = [s1, s2, s3, s4] df = pd.DataFrame(data, columns=['X1', 'X2', 'X3', 'X4']) df.columns = ['X1', 'X2', 'X3', 'X4'] df.index = ['Sample1', 'Sample2', 'Sample3', 'Sample4'] df X1 X2 X3 X4 Sample1 1.0 2.0 3.0 4.0 Sample2 1.4 2.1 2.9 5.1 Sample3 1.9 2.2 3.5 4.1 Sample4 1.4 2.5 3.5 4.2 By default, the index of the DataFrame is used as column labels, and the DataFrame columns are used for the plot legend. import matplotlib.pyplot as plt from mlxtend.plotting import stacked_barplot fig = stacked_barplot(df, rotation=45, legend_loc='best')","title":"Example 1 - Stacked Barplot from Pandas DataFrames"},{"location":"user_guide/plotting/stacked_barplot/#api","text":"stacked_barplot(df, bar_width='auto', colors='bgrcky', labels='index', rotation=90, legend_loc='best') Function to plot stacked barplots Parameters df : pandas.DataFrame A pandas DataFrame where the index denotes the x-axis labels, and the columns contain the different measurements for each row. bar_width: 'auto' or float (default: 'auto') Parameter to set the widths of the bars. if 'auto', the width is automatically determined by the number of columns in the dataset. colors: str (default: 'bgrcky') The colors of the bars. labels: 'index' or iterable (default: 'index') If 'index', the DataFrame index will be used as x-tick labels. rotation: int (default: 90) Parameter to rotate the x-axis labels. legend_loc : str (default: 'best') Location of the plot legend {best, upper left, upper right, lower left, lower right} No legend if legend_loc=False Returns fig : matplotlib.pyplot figure object Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/","title":"API"},{"location":"user_guide/preprocessing/CopyTransformer/","text":"CopyTransformer A simple transformer that returns a copy of the input array, for example, as part of a scikit-learn pipeline. from mlxtend.preprocessing import CopyTransformer Example 1 from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.feature_extraction.text import CountVectorizer from mlxtend.preprocessing import CopyTransformer import re import numpy as np X_train = np.array(['abc def ghi', 'this is a test', 'this is a test', 'this is a test']) y_train = np.array([0, 0, 1, 1]) pipe_1 = Pipeline([ ('vect', CountVectorizer()), ('to_dense', CopyTransformer()), ('clf', RandomForestClassifier()) ]) parameters_1 = dict( clf__n_estimators=[50, 100, 200], clf__max_features=['sqrt', 'log2', None],) grid_search_1 = GridSearchCV(pipe_1, parameters_1, n_jobs=1, verbose=1, scoring='accuracy', cv=2) print(\"Performing grid search...\") print(\"pipeline:\", [name for name, _ in pipe_1.steps]) print(\"parameters:\") grid_search_1.fit(X_train, y_train) print(\"Best score: %0.3f\" % grid_search_1.best_score_) print(\"Best parameters set:\") best_parameters_1 = grid_search_1.best_estimator_.get_params() for param_name in sorted(parameters_1.keys()): print(\"\\t%s: %r\" % (param_name, best_parameters_1[param_name])) Performing grid search... pipeline: ['vect', 'to_dense', 'clf'] parameters: Fitting 2 folds for each of 9 candidates, totalling 18 fits Best score: 0.500 Best parameters set: clf__max_features: 'sqrt' clf__n_estimators: 50 [Parallel(n_jobs=1)]: Done 18 out of 18 | elapsed: 2.9s finished API CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array.","title":"CopyTransformer"},{"location":"user_guide/preprocessing/CopyTransformer/#copytransformer","text":"A simple transformer that returns a copy of the input array, for example, as part of a scikit-learn pipeline. from mlxtend.preprocessing import CopyTransformer","title":"CopyTransformer"},{"location":"user_guide/preprocessing/CopyTransformer/#example-1","text":"from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.feature_extraction.text import CountVectorizer from mlxtend.preprocessing import CopyTransformer import re import numpy as np X_train = np.array(['abc def ghi', 'this is a test', 'this is a test', 'this is a test']) y_train = np.array([0, 0, 1, 1]) pipe_1 = Pipeline([ ('vect', CountVectorizer()), ('to_dense', CopyTransformer()), ('clf', RandomForestClassifier()) ]) parameters_1 = dict( clf__n_estimators=[50, 100, 200], clf__max_features=['sqrt', 'log2', None],) grid_search_1 = GridSearchCV(pipe_1, parameters_1, n_jobs=1, verbose=1, scoring='accuracy', cv=2) print(\"Performing grid search...\") print(\"pipeline:\", [name for name, _ in pipe_1.steps]) print(\"parameters:\") grid_search_1.fit(X_train, y_train) print(\"Best score: %0.3f\" % grid_search_1.best_score_) print(\"Best parameters set:\") best_parameters_1 = grid_search_1.best_estimator_.get_params() for param_name in sorted(parameters_1.keys()): print(\"\\t%s: %r\" % (param_name, best_parameters_1[param_name])) Performing grid search... pipeline: ['vect', 'to_dense', 'clf'] parameters: Fitting 2 folds for each of 9 candidates, totalling 18 fits Best score: 0.500 Best parameters set: clf__max_features: 'sqrt' clf__n_estimators: 50 [Parallel(n_jobs=1)]: Done 18 out of 18 | elapsed: 2.9s finished","title":"Example 1"},{"location":"user_guide/preprocessing/CopyTransformer/#api","text":"CopyTransformer() Transformer that returns a copy of the input array For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/","title":"API"},{"location":"user_guide/preprocessing/CopyTransformer/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a copy of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_copy : copy of the input X array.","title":"Methods"},{"location":"user_guide/preprocessing/DenseTransformer/","text":"DenseTransformer A simple transformer that converts a sparse into a dense numpy array, e.g., required for scikit-learn's Pipeline when, for example, CountVectorizers are used in combination with estimators that are not compatible with sparse matrices. from mlxtend.preprocessing import DenseTransformer Example 1 from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.feature_extraction.text import CountVectorizer from mlxtend.preprocessing import DenseTransformer import re import numpy as np X_train = np.array(['abc def ghi', 'this is a test', 'this is a test', 'this is a test']) y_train = np.array([0, 0, 1, 1]) pipe_1 = Pipeline([ ('vect', CountVectorizer()), ('to_dense', DenseTransformer()), ('clf', RandomForestClassifier()) ]) parameters_1 = dict( clf__n_estimators=[50, 100, 200], clf__max_features=['sqrt', 'log2', None],) grid_search_1 = GridSearchCV(pipe_1, parameters_1, n_jobs=1, verbose=1, scoring='accuracy', cv=2) print(\"Performing grid search...\") print(\"pipeline:\", [name for name, _ in pipe_1.steps]) print(\"parameters:\") grid_search_1.fit(X_train, y_train) print(\"Best score: %0.3f\" % grid_search_1.best_score_) print(\"Best parameters set:\") best_parameters_1 = grid_search_1.best_estimator_.get_params() for param_name in sorted(parameters_1.keys()): print(\"\\t%s: %r\" % (param_name, best_parameters_1[param_name])) Performing grid search... pipeline: ['vect', 'to_dense', 'clf'] parameters: Fitting 2 folds for each of 9 candidates, totalling 18 fits Best score: 0.500 Best parameters set: clf__max_features: 'sqrt' clf__n_estimators: 50 [Parallel(n_jobs=1)]: Done 18 out of 18 | elapsed: 3.9s finished API DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/ Methods fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array.","title":"DenseTransformer"},{"location":"user_guide/preprocessing/DenseTransformer/#densetransformer","text":"A simple transformer that converts a sparse into a dense numpy array, e.g., required for scikit-learn's Pipeline when, for example, CountVectorizers are used in combination with estimators that are not compatible with sparse matrices. from mlxtend.preprocessing import DenseTransformer","title":"DenseTransformer"},{"location":"user_guide/preprocessing/DenseTransformer/#example-1","text":"from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.feature_extraction.text import CountVectorizer from mlxtend.preprocessing import DenseTransformer import re import numpy as np X_train = np.array(['abc def ghi', 'this is a test', 'this is a test', 'this is a test']) y_train = np.array([0, 0, 1, 1]) pipe_1 = Pipeline([ ('vect', CountVectorizer()), ('to_dense', DenseTransformer()), ('clf', RandomForestClassifier()) ]) parameters_1 = dict( clf__n_estimators=[50, 100, 200], clf__max_features=['sqrt', 'log2', None],) grid_search_1 = GridSearchCV(pipe_1, parameters_1, n_jobs=1, verbose=1, scoring='accuracy', cv=2) print(\"Performing grid search...\") print(\"pipeline:\", [name for name, _ in pipe_1.steps]) print(\"parameters:\") grid_search_1.fit(X_train, y_train) print(\"Best score: %0.3f\" % grid_search_1.best_score_) print(\"Best parameters set:\") best_parameters_1 = grid_search_1.best_estimator_.get_params() for param_name in sorted(parameters_1.keys()): print(\"\\t%s: %r\" % (param_name, best_parameters_1[param_name])) Performing grid search... pipeline: ['vect', 'to_dense', 'clf'] parameters: Fitting 2 folds for each of 9 candidates, totalling 18 fits Best score: 0.500 Best parameters set: clf__max_features: 'sqrt' clf__n_estimators: 50 [Parallel(n_jobs=1)]: Done 18 out of 18 | elapsed: 3.9s finished","title":"Example 1"},{"location":"user_guide/preprocessing/DenseTransformer/#api","text":"DenseTransformer(return_copy=True) Convert a sparse array into a dense array. For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/","title":"API"},{"location":"user_guide/preprocessing/DenseTransformer/#methods","text":"fit(X, y=None) Mock method. Does nothing. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns self fit_transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, y=None) Return a dense version of the input array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] (default: None) Returns X_dense : dense version of the input X array.","title":"Methods"},{"location":"user_guide/preprocessing/MeanCenterer/","text":"Mean Centerer A transformer object that performs column-based mean centering on a NumPy array. from mlxtend.preprocessing import MeanCenterer Example 1 - Centering a NumPy Array Use the fit method to fit the column means of a dataset (e.g., the training dataset) to a new MeanCenterer object. Then, call the transform method on the same dataset to center it at the sample mean. import numpy as np from mlxtend.preprocessing import MeanCenterer X_train = np.array( [[1, 2, 3], [4, 5, 6], [7, 8, 9]]) mc = MeanCenterer().fit(X_train) mc.transform(X_train) array([[-3., -3., -3.], [ 0., 0., 0.], [ 3., 3., 3.]]) API MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/ Methods fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered.","title":"Mean Centerer"},{"location":"user_guide/preprocessing/MeanCenterer/#mean-centerer","text":"A transformer object that performs column-based mean centering on a NumPy array. from mlxtend.preprocessing import MeanCenterer","title":"Mean Centerer"},{"location":"user_guide/preprocessing/MeanCenterer/#example-1-centering-a-numpy-array","text":"Use the fit method to fit the column means of a dataset (e.g., the training dataset) to a new MeanCenterer object. Then, call the transform method on the same dataset to center it at the sample mean. import numpy as np from mlxtend.preprocessing import MeanCenterer X_train = np.array( [[1, 2, 3], [4, 5, 6], [7, 8, 9]]) mc = MeanCenterer().fit(X_train) mc.transform(X_train) array([[-3., -3., -3.], [ 0., 0., 0.], [ 3., 3., 3.]])","title":"Example 1 - Centering a NumPy Array"},{"location":"user_guide/preprocessing/MeanCenterer/#api","text":"MeanCenterer() Column centering of vectors and matrices. Attributes col_means : numpy.ndarray [n_columns] NumPy array storing the mean values for centering after fitting the MeanCenterer object. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/","title":"API"},{"location":"user_guide/preprocessing/MeanCenterer/#methods","text":"fit(X) Gets the column means for mean centering. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns self fit_transform(X) Fits and transforms an arry. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered. transform(X) Centers a NumPy array. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Array of data vectors, where n_samples is the number of samples and n_features is the number of features. Returns X_tr : {array-like, sparse matrix}, shape = [n_samples, n_features] A copy of the input array with the columns centered.","title":"Methods"},{"location":"user_guide/preprocessing/TransactionEncoder/","text":"TransactionEncoder Encoder class for transaction data in Python lists from mlxtend.preprocessing import TransactionEncoder Overview Encodes database transaction data in form of a Python list of lists into a NumPy array. Example 1 Suppose we have the following transaction data: from mlxtend.preprocessing import TransactionEncoder dataset = [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] Using and TransactionEncoder object, we can transform this dataset into an array format suitable for typical machine learning APIs. Via the fit method, the TransactionEncoder learns the unique labels in the dataset, and via the transform method, it transforms the input dataset (a Python list of lists) into a one-hot encoded NumPy boolean array: te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) te_ary array([[ True, False, True, True, False, True], [ True, False, True, False, False, True], [ True, False, True, False, False, False], [ True, True, False, False, False, False], [False, False, True, True, True, True], [False, False, True, False, True, True], [False, False, True, False, True, False], [ True, True, False, False, False, False]], dtype=bool) The NumPy array is boolean for the sake of memory efficiency when working with large datasets. If a classic integer representation is desired instead, we can just convert the array to the appropriate type: te_ary.astype(\"int\") array([[1, 0, 1, 1, 0, 1], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1], [0, 0, 1, 0, 1, 1], [0, 0, 1, 0, 1, 0], [1, 1, 0, 0, 0, 0]]) After fitting, the unique column names that correspond to the data array shown above can be accessed via the columns_ attribute: te.columns_ ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] For our convenience, we can turn theencoded array into a pandas DataFrame : import pandas as pd pd.DataFrame(te_ary, columns=te.columns_) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Bananas Beer Chicken Milk Rice 0 True False True True False True 1 True False True False False True 2 True False True False False False 3 True True False False False False 4 False False True True True True 5 False False True False True True 6 False False True False True False 7 True True False False False False If we desire, we can turn the one-hot encoded array back into a transaction list of lists via the inverse_transform function: first4 = te_ary[:4] te.inverse_transform(first4) [['Apple', 'Beer', 'Chicken', 'Rice'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas']] API TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/ Methods fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"TransactionEncoder"},{"location":"user_guide/preprocessing/TransactionEncoder/#transactionencoder","text":"Encoder class for transaction data in Python lists from mlxtend.preprocessing import TransactionEncoder","title":"TransactionEncoder"},{"location":"user_guide/preprocessing/TransactionEncoder/#overview","text":"Encodes database transaction data in form of a Python list of lists into a NumPy array.","title":"Overview"},{"location":"user_guide/preprocessing/TransactionEncoder/#example-1","text":"Suppose we have the following transaction data: from mlxtend.preprocessing import TransactionEncoder dataset = [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] Using and TransactionEncoder object, we can transform this dataset into an array format suitable for typical machine learning APIs. Via the fit method, the TransactionEncoder learns the unique labels in the dataset, and via the transform method, it transforms the input dataset (a Python list of lists) into a one-hot encoded NumPy boolean array: te = TransactionEncoder() te_ary = te.fit(dataset).transform(dataset) te_ary array([[ True, False, True, True, False, True], [ True, False, True, False, False, True], [ True, False, True, False, False, False], [ True, True, False, False, False, False], [False, False, True, True, True, True], [False, False, True, False, True, True], [False, False, True, False, True, False], [ True, True, False, False, False, False]], dtype=bool) The NumPy array is boolean for the sake of memory efficiency when working with large datasets. If a classic integer representation is desired instead, we can just convert the array to the appropriate type: te_ary.astype(\"int\") array([[1, 0, 1, 1, 0, 1], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1], [0, 0, 1, 0, 1, 1], [0, 0, 1, 0, 1, 0], [1, 1, 0, 0, 0, 0]]) After fitting, the unique column names that correspond to the data array shown above can be accessed via the columns_ attribute: te.columns_ ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] For our convenience, we can turn theencoded array into a pandas DataFrame : import pandas as pd pd.DataFrame(te_ary, columns=te.columns_) .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } Apple Bananas Beer Chicken Milk Rice 0 True False True True False True 1 True False True False False True 2 True False True False False False 3 True True False False False False 4 False False True True True True 5 False False True False True True 6 False False True False True False 7 True True False False False False If we desire, we can turn the one-hot encoded array back into a transaction list of lists via the inverse_transform function: first4 = te_ary[:4] te.inverse_transform(first4) [['Apple', 'Beer', 'Chicken', 'Rice'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas']]","title":"Example 1"},{"location":"user_guide/preprocessing/TransactionEncoder/#api","text":"TransactionEncoder() Encoder class for transaction data in Python lists Parameters None Attributes columns_: list List of unique names in the X input list of lists Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/","title":"API"},{"location":"user_guide/preprocessing/TransactionEncoder/#methods","text":"fit(X) Learn unique column names from transaction DataFrame Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] fit_transform(X, sparse=False) Fit a TransactionEncoder encoder and transform a dataset. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. inverse_transform(array) Transforms an encoded NumPy array back into transactions. Parameters array : NumPy array [n_transactions, n_unique_items] The NumPy one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice'] Returns X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self transform(X, sparse=False) Transform transactions into a one-hot encoded NumPy array. Parameters X : list of lists A python list of lists, where the outer list stores the n transactions and the inner list stores the items in each transaction. For example, [['Apple', 'Beer', 'Rice', 'Chicken'], ['Apple', 'Beer', 'Rice'], ['Apple', 'Beer'], ['Apple', 'Bananas'], ['Milk', 'Beer', 'Rice', 'Chicken'], ['Milk', 'Beer', 'Rice'], ['Milk', 'Beer'], ['Apple', 'Bananas']] sparse: bool (default=False) If True, transform will return Compressed Sparse Row matrix instead of the regular one. Returns array : NumPy array [n_transactions, n_unique_items] if sparse=False (default). Compressed Sparse Row matrix otherwise The one-hot encoded boolean array of the input transactions, where the columns represent the unique items found in the input array in alphabetic order. Exact representation depends on the sparse argument For example, array([[True , False, True , True , False, True ], [True , False, True , False, False, True ], [True , False, True , False, False, False], [True , True , False, False, False, False], [False, False, True , True , True , True ], [False, False, True , False, True , True ], [False, False, True , False, True , False], [True , True , False, False, False, False]]) The corresponding column labels are available as self.columns_, e.g., ['Apple', 'Bananas', 'Beer', 'Chicken', 'Milk', 'Rice']","title":"Methods"},{"location":"user_guide/preprocessing/minmax_scaling/","text":"MinMax Scaling A function for min-max scaling of pandas DataFrames or NumPy arrays. from mlxtend.preprocessing import MinMaxScaling An alternative approach to Z-score normalization (or standardization) is the so-called Min-Max scaling (often also simply called \"normalization\" - a common cause for ambiguities). In this approach, the data is scaled to a fixed range - usually 0 to 1. The cost of having this bounded range - in contrast to standardization - is that we will end up with smaller standard deviations, which can suppress the effect of outliers. A Min-Max scaling is typically done via the following equation: X_{sc} = \\frac{X - X_{min}}{X_{max} - X_{min}}. One family of algorithms that is scale-invariant encompasses tree-based learning algorithms. Let's take the general CART decision tree algorithm. Without going into much depth regarding information gain and impurity measures, we can think of the decision as \"is feature x_i >= some_val?\" Intuitively, we can see that it really doesn't matter on which scale this feature is (centimeters, Fahrenheit, a standardized scale -- it really doesn't matter). Some examples of algorithms where feature scaling matters are: k-nearest neighbors with an Euclidean distance measure if want all features to contribute equally k-means (see k-nearest neighbors) logistic regression, SVMs, perceptrons, neural networks etc. if you are using gradient descent/ascent-based optimization, otherwise some weights will update much faster than others linear discriminant analysis, principal component analysis, kernel principal component analysis since you want to find directions of maximizing the variance (under the constraints that those directions/eigenvectors/principal components are orthogonal); you want to have features on the same scale since you'd emphasize variables on \"larger measurement scales\" more. There are many more cases than I can possibly list here ... I always recommend you to think about the algorithm and what it's doing, and then it typically becomes obvious whether we want to scale your features or not. In addition, we'd also want to think about whether we want to \"standardize\" or \"normalize\" (here: scaling to [0, 1] range) our data. Some algorithms assume that our data is centered at 0. For example, if we initialize the weights of a small multi-layer perceptron with tanh activation units to 0 or small random values centered around zero, we want to update the model weights \"equally.\" As a rule of thumb I'd say: When in doubt, just standardize the data, it shouldn't hurt. Example 1 - Scaling a Pandas DataFrame import pandas as pd s1 = pd.Series([1, 2, 3, 4, 5, 6], index=(range(6))) s2 = pd.Series([10, 9, 8, 7, 6, 5], index=(range(6))) df = pd.DataFrame(s1, columns=['s1']) df['s2'] = s2 df s1 s2 0 1 10 1 2 9 2 3 8 3 4 7 4 5 6 5 6 5 from mlxtend.preprocessing import minmax_scaling minmax_scaling(df, columns=['s1', 's2']) s1 s2 0 0.0 1.0 1 0.2 0.8 2 0.4 0.6 3 0.6 0.4 4 0.8 0.2 5 1.0 0.0 Example 2 - Scaling a NumPy Array import numpy as np X = np.array([[1, 10], [2, 9], [3, 8], [4, 7], [5, 6], [6, 5]]) X array([[ 1, 10], [ 2, 9], [ 3, 8], [ 4, 7], [ 5, 6], [ 6, 5]]) from mlxtend.preprocessing import minmax_scaling minmax_scaling(X, columns=[0, 1]) array([[ 0. , 1. ], [ 0.2, 0.8], [ 0.4, 0.6], [ 0.6, 0.4], [ 0.8, 0.2], [ 1. , 0. ]]) API minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/","title":"MinMax Scaling"},{"location":"user_guide/preprocessing/minmax_scaling/#minmax-scaling","text":"A function for min-max scaling of pandas DataFrames or NumPy arrays. from mlxtend.preprocessing import MinMaxScaling An alternative approach to Z-score normalization (or standardization) is the so-called Min-Max scaling (often also simply called \"normalization\" - a common cause for ambiguities). In this approach, the data is scaled to a fixed range - usually 0 to 1. The cost of having this bounded range - in contrast to standardization - is that we will end up with smaller standard deviations, which can suppress the effect of outliers. A Min-Max scaling is typically done via the following equation: X_{sc} = \\frac{X - X_{min}}{X_{max} - X_{min}}. One family of algorithms that is scale-invariant encompasses tree-based learning algorithms. Let's take the general CART decision tree algorithm. Without going into much depth regarding information gain and impurity measures, we can think of the decision as \"is feature x_i >= some_val?\" Intuitively, we can see that it really doesn't matter on which scale this feature is (centimeters, Fahrenheit, a standardized scale -- it really doesn't matter). Some examples of algorithms where feature scaling matters are: k-nearest neighbors with an Euclidean distance measure if want all features to contribute equally k-means (see k-nearest neighbors) logistic regression, SVMs, perceptrons, neural networks etc. if you are using gradient descent/ascent-based optimization, otherwise some weights will update much faster than others linear discriminant analysis, principal component analysis, kernel principal component analysis since you want to find directions of maximizing the variance (under the constraints that those directions/eigenvectors/principal components are orthogonal); you want to have features on the same scale since you'd emphasize variables on \"larger measurement scales\" more. There are many more cases than I can possibly list here ... I always recommend you to think about the algorithm and what it's doing, and then it typically becomes obvious whether we want to scale your features or not. In addition, we'd also want to think about whether we want to \"standardize\" or \"normalize\" (here: scaling to [0, 1] range) our data. Some algorithms assume that our data is centered at 0. For example, if we initialize the weights of a small multi-layer perceptron with tanh activation units to 0 or small random values centered around zero, we want to update the model weights \"equally.\" As a rule of thumb I'd say: When in doubt, just standardize the data, it shouldn't hurt.","title":"MinMax Scaling"},{"location":"user_guide/preprocessing/minmax_scaling/#example-1-scaling-a-pandas-dataframe","text":"import pandas as pd s1 = pd.Series([1, 2, 3, 4, 5, 6], index=(range(6))) s2 = pd.Series([10, 9, 8, 7, 6, 5], index=(range(6))) df = pd.DataFrame(s1, columns=['s1']) df['s2'] = s2 df s1 s2 0 1 10 1 2 9 2 3 8 3 4 7 4 5 6 5 6 5 from mlxtend.preprocessing import minmax_scaling minmax_scaling(df, columns=['s1', 's2']) s1 s2 0 0.0 1.0 1 0.2 0.8 2 0.4 0.6 3 0.6 0.4 4 0.8 0.2 5 1.0 0.0","title":"Example 1 - Scaling a Pandas DataFrame"},{"location":"user_guide/preprocessing/minmax_scaling/#example-2-scaling-a-numpy-array","text":"import numpy as np X = np.array([[1, 10], [2, 9], [3, 8], [4, 7], [5, 6], [6, 5]]) X array([[ 1, 10], [ 2, 9], [ 3, 8], [ 4, 7], [ 5, 6], [ 6, 5]]) from mlxtend.preprocessing import minmax_scaling minmax_scaling(X, columns=[0, 1]) array([[ 0. , 1. ], [ 0.2, 0.8], [ 0.4, 0.6], [ 0.6, 0.4], [ 0.8, 0.2], [ 1. , 0. ]])","title":"Example 2 - Scaling a NumPy Array"},{"location":"user_guide/preprocessing/minmax_scaling/#api","text":"minmax_scaling(array, columns, min_val=0, max_val=1) Min max scaling of pandas' DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] min_val : int or float , optional (default= 0 ) minimum value after rescaling. max_val : int or float , optional (default= 1 ) maximum value after rescaling. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with rescaled columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/","title":"API"},{"location":"user_guide/preprocessing/one-hot_encoding/","text":"One-Hot Encoding A function that performs one-hot encoding for class labels. from mlxtend.preprocessing import one_hot Overview Typical supervised machine learning algorithms for classifications assume that the class labels are nominal (a special case of categorical where no order is implied). A typical example of an nominal feature would be \"color\" since we can't say (in most applications) that \"orange > blue > red\". The one_hot function provides a simple interface to convert class label integers into a so-called one-hot array, where each unique label is represented as a column in the new array. For example, let's assume we have 5 data points from 3 different classes: 0, 1, and 2. y = [0, # sample 1, class 0 1, # sample 2, class 1 0, # sample 3, class 0 2, # sample 4, class 2 2] # sample 5, class 2 After one-hot encoding, we then obtain the following array (note that the index position of the \"1\" in each row denotes the class label of this sample): y = [[1, 0, 0], # sample 1, class 0 [0, 1, 0], # sample 2, class 1 [1, 0, 0], # sample 3, class 0 [0, 0, 1], # sample 4, class 2 [0, 0, 1] # sample 5, class 2 ]) Example 1 - Defaults from mlxtend.preprocessing import one_hot import numpy as np y = np.array([0, 1, 2, 1, 2]) one_hot(y) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.], [ 0., 1., 0.], [ 0., 0., 1.]]) Example 2 - Python Lists from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.], [ 0., 1., 0.], [ 0., 0., 1.]]) Example 3 - Integer Arrays from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y, dtype='int') array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]) Example 4 - Arbitrary Numbers of Class Labels from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y, num_labels=10) array([[ 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]]) API one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/","title":"One hot encoding"},{"location":"user_guide/preprocessing/one-hot_encoding/#one-hot-encoding","text":"A function that performs one-hot encoding for class labels. from mlxtend.preprocessing import one_hot","title":"One-Hot Encoding"},{"location":"user_guide/preprocessing/one-hot_encoding/#overview","text":"Typical supervised machine learning algorithms for classifications assume that the class labels are nominal (a special case of categorical where no order is implied). A typical example of an nominal feature would be \"color\" since we can't say (in most applications) that \"orange > blue > red\". The one_hot function provides a simple interface to convert class label integers into a so-called one-hot array, where each unique label is represented as a column in the new array. For example, let's assume we have 5 data points from 3 different classes: 0, 1, and 2. y = [0, # sample 1, class 0 1, # sample 2, class 1 0, # sample 3, class 0 2, # sample 4, class 2 2] # sample 5, class 2 After one-hot encoding, we then obtain the following array (note that the index position of the \"1\" in each row denotes the class label of this sample): y = [[1, 0, 0], # sample 1, class 0 [0, 1, 0], # sample 2, class 1 [1, 0, 0], # sample 3, class 0 [0, 0, 1], # sample 4, class 2 [0, 0, 1] # sample 5, class 2 ])","title":"Overview"},{"location":"user_guide/preprocessing/one-hot_encoding/#example-1-defaults","text":"from mlxtend.preprocessing import one_hot import numpy as np y = np.array([0, 1, 2, 1, 2]) one_hot(y) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.], [ 0., 1., 0.], [ 0., 0., 1.]])","title":"Example 1 - Defaults"},{"location":"user_guide/preprocessing/one-hot_encoding/#example-2-python-lists","text":"from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.], [ 0., 1., 0.], [ 0., 0., 1.]])","title":"Example 2 - Python Lists"},{"location":"user_guide/preprocessing/one-hot_encoding/#example-3-integer-arrays","text":"from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y, dtype='int') array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]])","title":"Example 3 - Integer Arrays"},{"location":"user_guide/preprocessing/one-hot_encoding/#example-4-arbitrary-numbers-of-class-labels","text":"from mlxtend.preprocessing import one_hot y = [0, 1, 2, 1, 2] one_hot(y, num_labels=10) array([[ 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], [ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]])","title":"Example 4 - Arbitrary Numbers of Class Labels"},{"location":"user_guide/preprocessing/one-hot_encoding/#api","text":"one_hot(y, num_labels='auto', dtype='float') One-hot encoding of class labels Parameters y : array-like, shape = [n_classlabels] Python list or numpy array consisting of class labels. num_labels : int or 'auto' Number of unique labels in the class label array. Infers the number of unique labels from the input array if set to 'auto'. dtype : str NumPy array type (float, float32, float64) of the output array. Returns ary : numpy.ndarray, shape = [n_classlabels] One-hot encoded array, where each sample is represented as a row vector in the returned array. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/one_hot/","title":"API"},{"location":"user_guide/preprocessing/shuffle_arrays_unison/","text":"Shuffle Arrays in Unison A function for NumPy arrays in unison. from mlxtend.preprocessing import shuffle_arrays_unison Example 1 - Scaling a Pandas DataFrame import numpy as np from mlxtend.preprocessing import shuffle_arrays_unison X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) y = np.array([1, 2, 3]) print('X:\\n%s' % X) print('y:\\n%s' % y) X: [[1 2 3] [4 5 6] [7 8 9]] y: [1 2 3] X2, y2 = shuffle_arrays_unison(arrays=[X, y], random_seed=3) print('X2:\\n%s' % X2) print('y2:\\n%s' % y2) X2: [[4 5 6] [1 2 3] [7 8 9]] y2: [2 1 3] API shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/","title":"Shuffle Arrays in Unison"},{"location":"user_guide/preprocessing/shuffle_arrays_unison/#shuffle-arrays-in-unison","text":"A function for NumPy arrays in unison. from mlxtend.preprocessing import shuffle_arrays_unison","title":"Shuffle Arrays in Unison"},{"location":"user_guide/preprocessing/shuffle_arrays_unison/#example-1-scaling-a-pandas-dataframe","text":"import numpy as np from mlxtend.preprocessing import shuffle_arrays_unison X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) y = np.array([1, 2, 3]) print('X:\\n%s' % X) print('y:\\n%s' % y) X: [[1 2 3] [4 5 6] [7 8 9]] y: [1 2 3] X2, y2 = shuffle_arrays_unison(arrays=[X, y], random_seed=3) print('X2:\\n%s' % X2) print('y2:\\n%s' % y2) X2: [[4 5 6] [1 2 3] [7 8 9]] y2: [2 1 3]","title":"Example 1 - Scaling a Pandas DataFrame"},{"location":"user_guide/preprocessing/shuffle_arrays_unison/#api","text":"shuffle_arrays_unison(arrays, random_seed=None) Shuffle NumPy arrays in unison. Parameters arrays : array-like, shape = [n_arrays] A list of NumPy arrays. random_seed : int (default: None) Sets the random state. Returns shuffled_arrays : A list of NumPy arrays after shuffling. Examples >>> import numpy as np >>> from mlxtend.preprocessing import shuffle_arrays_unison >>> X1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> y1 = np.array([1, 2, 3]) >>> X2, y2 = shuffle_arrays_unison(arrays=[X1, y1], random_seed=3) >>> assert(X2.all() == np.array([[4, 5, 6], [1, 2, 3], [7, 8, 9]]).all()) >>> assert(y2.all() == np.array([2, 1, 3]).all()) >>> For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/","title":"API"},{"location":"user_guide/preprocessing/standardize/","text":"Standardize A function that performs column-based standardization on a NumPy array. from mlxtend.preprocessing import standardize Overview The result of standardization (or Z-score normalization) is that the features will be rescaled so that they'll have the properties of a standard normal distribution with \\mu = 0 and \\sigma = 1 . where \\mu is the mean (average) and \\sigma is the standard deviation from the mean; standard scores (also called z scores) of the samples are calculated as z=\\frac{x-\\mu}{\\sigma}. Standardizing the features so that they are centered around 0 with a standard deviation of 1 is not only important if we are comparing measurements that have different units, but it is also a general requirement for the optimal performance of many machine learning algorithms. One family of algorithms that is scale-invariant encompasses tree-based learning algorithms. Let's take the general CART decision tree algorithm. Without going into much depth regarding information gain and impurity measures, we can think of the decision as \"is feature x_i >= some_val?\" Intuitively, we can see that it really doesn't matter on which scale this feature is (centimeters, Fahrenheit, a standardized scale -- it really doesn't matter). Some examples of algorithms where feature scaling matters are: k-nearest neighbors with an Euclidean distance measure if want all features to contribute equally k-means (see k-nearest neighbors) logistic regression, SVMs, perceptrons, neural networks etc. if you are using gradient descent/ascent-based optimization, otherwise some weights will update much faster than others linear discriminant analysis, principal component analysis, kernel principal component analysis since you want to find directions of maximizing the variance (under the constraints that those directions/eigenvectors/principal components are orthogonal); you want to have features on the same scale since you'd emphasize variables on \"larger measurement scales\" more. There are many more cases than I can possibly list here ... I always recommend you to think about the algorithm and what it's doing, and then it typically becomes obvious whether we want to scale your features or not. In addition, we'd also want to think about whether we want to \"standardize\" or \"normalize\" (here: scaling to [0, 1] range) our data. Some algorithms assume that our data is centered at 0. For example, if we initialize the weights of a small multi-layer perceptron with tanh activation units to 0 or small random values centered around zero, we want to update the model weights \"equally.\" As a rule of thumb I'd say: When in doubt, just standardize the data, it shouldn't hurt. Example 1 - Standardize a Pandas DataFrame import pandas as pd s1 = pd.Series([1, 2, 3, 4, 5, 6], index=(range(6))) s2 = pd.Series([10, 9, 8, 7, 6, 5], index=(range(6))) df = pd.DataFrame(s1, columns=['s1']) df['s2'] = s2 df s1 s2 0 1 10 1 2 9 2 3 8 3 4 7 4 5 6 5 6 5 from mlxtend.preprocessing import standardize standardize(df, columns=['s1', 's2']) s1 s2 0 -1.46385 1.46385 1 -0.87831 0.87831 2 -0.29277 0.29277 3 0.29277 -0.29277 4 0.87831 -0.87831 5 1.46385 -1.46385 Example 2 - Standardize a NumPy Array import numpy as np X = np.array([[1, 10], [2, 9], [3, 8], [4, 7], [5, 6], [6, 5]]) X array([[ 1, 10], [ 2, 9], [ 3, 8], [ 4, 7], [ 5, 6], [ 6, 5]]) from mlxtend.preprocessing import standardize standardize(X, columns=[0, 1]) array([[-1.46385011, 1.46385011], [-0.87831007, 0.87831007], [-0.29277002, 0.29277002], [ 0.29277002, -0.29277002], [ 0.87831007, -0.87831007], [ 1.46385011, -1.46385011]]) Example 3 - Re-using parameters In machine learning contexts, it is desired to re-use the parameters that have been obtained from a training set to scale new, future data (including the independent test set). By setting return_params=True , the standardize function returns a second object, a parameter dictionary containing the column means and standard deviations that can be re-used by feeding it to the params parameter upon function call. import numpy as np from mlxtend.preprocessing import standardize X_train = np.array([[1, 10], [4, 7], [3, 8]]) X_test = np.array([[1, 2], [3, 4], [5, 6]]) X_train_std, params = standardize(X_train, columns=[0, 1], return_params=True) X_train_std array([[-1.33630621, 1.33630621], [ 1.06904497, -1.06904497], [ 0.26726124, -0.26726124]]) params {'avgs': array([ 2.66666667, 8.33333333]), 'stds': array([ 1.24721913, 1.24721913])} X_test_std = standardize(X_test, columns=[0, 1], params=params) X_test_std array([[-1.33630621, -5.0779636 ], [ 0.26726124, -3.47439614], [ 1.87082869, -1.87082869]]) API standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"Standardize"},{"location":"user_guide/preprocessing/standardize/#standardize","text":"A function that performs column-based standardization on a NumPy array. from mlxtend.preprocessing import standardize","title":"Standardize"},{"location":"user_guide/preprocessing/standardize/#overview","text":"The result of standardization (or Z-score normalization) is that the features will be rescaled so that they'll have the properties of a standard normal distribution with \\mu = 0 and \\sigma = 1 . where \\mu is the mean (average) and \\sigma is the standard deviation from the mean; standard scores (also called z scores) of the samples are calculated as z=\\frac{x-\\mu}{\\sigma}. Standardizing the features so that they are centered around 0 with a standard deviation of 1 is not only important if we are comparing measurements that have different units, but it is also a general requirement for the optimal performance of many machine learning algorithms. One family of algorithms that is scale-invariant encompasses tree-based learning algorithms. Let's take the general CART decision tree algorithm. Without going into much depth regarding information gain and impurity measures, we can think of the decision as \"is feature x_i >= some_val?\" Intuitively, we can see that it really doesn't matter on which scale this feature is (centimeters, Fahrenheit, a standardized scale -- it really doesn't matter). Some examples of algorithms where feature scaling matters are: k-nearest neighbors with an Euclidean distance measure if want all features to contribute equally k-means (see k-nearest neighbors) logistic regression, SVMs, perceptrons, neural networks etc. if you are using gradient descent/ascent-based optimization, otherwise some weights will update much faster than others linear discriminant analysis, principal component analysis, kernel principal component analysis since you want to find directions of maximizing the variance (under the constraints that those directions/eigenvectors/principal components are orthogonal); you want to have features on the same scale since you'd emphasize variables on \"larger measurement scales\" more. There are many more cases than I can possibly list here ... I always recommend you to think about the algorithm and what it's doing, and then it typically becomes obvious whether we want to scale your features or not. In addition, we'd also want to think about whether we want to \"standardize\" or \"normalize\" (here: scaling to [0, 1] range) our data. Some algorithms assume that our data is centered at 0. For example, if we initialize the weights of a small multi-layer perceptron with tanh activation units to 0 or small random values centered around zero, we want to update the model weights \"equally.\" As a rule of thumb I'd say: When in doubt, just standardize the data, it shouldn't hurt.","title":"Overview"},{"location":"user_guide/preprocessing/standardize/#example-1-standardize-a-pandas-dataframe","text":"import pandas as pd s1 = pd.Series([1, 2, 3, 4, 5, 6], index=(range(6))) s2 = pd.Series([10, 9, 8, 7, 6, 5], index=(range(6))) df = pd.DataFrame(s1, columns=['s1']) df['s2'] = s2 df s1 s2 0 1 10 1 2 9 2 3 8 3 4 7 4 5 6 5 6 5 from mlxtend.preprocessing import standardize standardize(df, columns=['s1', 's2']) s1 s2 0 -1.46385 1.46385 1 -0.87831 0.87831 2 -0.29277 0.29277 3 0.29277 -0.29277 4 0.87831 -0.87831 5 1.46385 -1.46385","title":"Example 1 - Standardize a Pandas DataFrame"},{"location":"user_guide/preprocessing/standardize/#example-2-standardize-a-numpy-array","text":"import numpy as np X = np.array([[1, 10], [2, 9], [3, 8], [4, 7], [5, 6], [6, 5]]) X array([[ 1, 10], [ 2, 9], [ 3, 8], [ 4, 7], [ 5, 6], [ 6, 5]]) from mlxtend.preprocessing import standardize standardize(X, columns=[0, 1]) array([[-1.46385011, 1.46385011], [-0.87831007, 0.87831007], [-0.29277002, 0.29277002], [ 0.29277002, -0.29277002], [ 0.87831007, -0.87831007], [ 1.46385011, -1.46385011]])","title":"Example 2 - Standardize a NumPy Array"},{"location":"user_guide/preprocessing/standardize/#example-3-re-using-parameters","text":"In machine learning contexts, it is desired to re-use the parameters that have been obtained from a training set to scale new, future data (including the independent test set). By setting return_params=True , the standardize function returns a second object, a parameter dictionary containing the column means and standard deviations that can be re-used by feeding it to the params parameter upon function call. import numpy as np from mlxtend.preprocessing import standardize X_train = np.array([[1, 10], [4, 7], [3, 8]]) X_test = np.array([[1, 2], [3, 4], [5, 6]]) X_train_std, params = standardize(X_train, columns=[0, 1], return_params=True) X_train_std array([[-1.33630621, 1.33630621], [ 1.06904497, -1.06904497], [ 0.26726124, -0.26726124]]) params {'avgs': array([ 2.66666667, 8.33333333]), 'stds': array([ 1.24721913, 1.24721913])} X_test_std = standardize(X_test, columns=[0, 1], params=params) X_test_std array([[-1.33630621, -5.0779636 ], [ 0.26726124, -3.47439614], [ 1.87082869, -1.87082869]])","title":"Example 3 - Re-using parameters"},{"location":"user_guide/preprocessing/standardize/#api","text":"standardize(array, columns=None, ddof=0, return_params=False, params=None) Standardize columns in pandas DataFrames. Parameters array : pandas DataFrame or NumPy ndarray, shape = [n_rows, n_columns]. columns : array-like, shape = [n_columns] (default: None) Array-like with column names, e.g., ['col1', 'col2', ...] or column indices [0, 2, 4, ...] If None, standardizes all columns. ddof : int (default: 0) Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. return_params : dict (default: False) If set to True, a dictionary is returned in addition to the standardized array. The parameter dictionary contains the column means ('avgs') and standard deviations ('stds') of the individual columns. params : dict (default: None) A dictionary with column means and standard deviations as returned by the standardize function if return_params was set to True. If a params dictionary is provided, the standardize function will use these instead of computing them from the current array. Notes If all values in a given column are the same, these values are all set to 0.0 . The standard deviation in the parameters dictionary is consequently set to 1.0 to avoid dividing by zero. Returns df_new : pandas DataFrame object. Copy of the array or DataFrame with standardized columns. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/","title":"API"},{"location":"user_guide/regressor/LinearRegression/","text":"LinearRegression A implementation of Ordinary Least Squares simple and multiple linear regression. from mlxtend.regressor import LinearRegression Overview Illustration of a simple linear regression model: In Ordinary Least Squares (OLS) Linear Regression, our goal is to find the line (or hyperplane) that minimizes the vertical offsets. Or in other words, we define the best-fitting line as the line that minimizes the sum of squared errors (SSE) or mean squared error (MSE) between our target variable (y) and our predicted output over all samples i in our dataset of size n . SSE = \\sum_i \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 MSE = \\frac{1}{n} \\times SSE Now, LinearRegression implements a linear regression model for performing ordinary least squares regression using one of the following three approaches: Normal Equations Gradient Descent Stochastic Gradient Descent Normal Equations (closed-form solution) The closed-form solution should be preferred for \"smaller\" datasets where calculating (a \"costly\") matrix inverse is not a concern. For very large datasets, or datasets where the inverse of [X^T X] may not exist (the matrix is non-invertible or singular, e.g., in case of perfect multicollinearity), the gradient descent or stochastic gradient descent approaches are to be preferred. The linear function (linear regression model) is defined as: y = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{i=0}^{n} = \\mathbf{w}^T\\mathbf{x} where y is the response variable, \\mathbf{x} is an m -dimensional sample vector, and \\mathbf{w} is the weight vector (vector of coefficients). Note that w_0 represents the y-axis intercept of the model and therefore x_0=1 . Using the closed-form solution (normal equation), we compute the weights of the model as follows: \\mathbf{w} = (\\mathbf{X}^T\\mathbf{X})^{-1}\\mathbf{X}^Ty Gradient Descent (GD) and Stochastic Gradient Descent (SGD) See Gradient Descent and Stochastic Gradient Descent and Deriving the Gradient Descent Rule for Linear Regression and Adaline for details. Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates References F. Galton. Regression towards mediocrity in hereditary stature . Journal of the Anthropological Institute of Great Britain and Ireland, pages 246\u2013263, 1886. A. I. Khuri. Introduction to linear regression analysis , by Douglas C. Montgomery, Elizabeth A. Peck, G. Geoffrey Vining. International Statistical Review, 81(2):318\u2013319, 2013. D. S. G. Pollock. The Classical Linear Regression Model . Example 1 - Closed Form Solution import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) ne_lr = LinearRegression(minibatches=None) ne_lr.fit(X, y) print('Intercept: %.2f' % ne_lr.b_) print('Slope: %.2f' % ne_lr.w_[0]) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, ne_lr) plt.show() Intercept: 0.25 Slope: 0.81 Example 2 - Gradient Descent import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) gd_lr = LinearRegression(eta=0.005, epochs=100, minibatches=1, random_seed=123, print_progress=3) gd_lr.fit(X, y) print('Intercept: %.2f' % gd_lr.b_) print('Slope: %.2f' % gd_lr.w_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, gd_lr) plt.show() Iteration: 100/100 | Cost 0.08 | Elapsed: 0:00:00 | ETA: 0:00:000 Intercept: 0.22 Slope: 0.82 # Visualizing the cost to check for convergence and plotting the linear model: plt.plot(range(1, gd_lr.epochs+1), gd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show() Example 3 - Stochastic Gradient Descent import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) sgd_lr = LinearRegression(eta=0.01, epochs=100, random_seed=0, minibatches=len(y)) sgd_lr.fit(X, y) print('Intercept: %.2f' % sgd_lr.w_) print('Slope: %.2f' % sgd_lr.b_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, sgd_lr) plt.show() Intercept: 0.82 Slope: 0.24 plt.plot(range(1, sgd_lr.epochs+1), sgd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show() Example 3 - Stochastic Gradient Descent with Minibatches import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) sgd_lr = LinearRegression(eta=0.01, epochs=100, random_seed=0, minibatches=3) sgd_lr.fit(X, y) print('Intercept: %.2f' % sgd_lr.b_) print('Slope: %.2f' % sgd_lr.w_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, sgd_lr) plt.show() Intercept: 0.24 Slope: 0.82 plt.plot(range(1, sgd_lr.epochs+1), sgd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show() API LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/ Methods fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py Author: Gael Varoquaux gael.varoquaux@normalesup.org License: BSD 3 clause","title":"LinearRegression"},{"location":"user_guide/regressor/LinearRegression/#linearregression","text":"A implementation of Ordinary Least Squares simple and multiple linear regression. from mlxtend.regressor import LinearRegression","title":"LinearRegression"},{"location":"user_guide/regressor/LinearRegression/#overview","text":"Illustration of a simple linear regression model: In Ordinary Least Squares (OLS) Linear Regression, our goal is to find the line (or hyperplane) that minimizes the vertical offsets. Or in other words, we define the best-fitting line as the line that minimizes the sum of squared errors (SSE) or mean squared error (MSE) between our target variable (y) and our predicted output over all samples i in our dataset of size n . SSE = \\sum_i \\big(\\text{target}^{(i)} - \\text{output}^{(i)}\\big)^2 MSE = \\frac{1}{n} \\times SSE Now, LinearRegression implements a linear regression model for performing ordinary least squares regression using one of the following three approaches: Normal Equations Gradient Descent Stochastic Gradient Descent","title":"Overview"},{"location":"user_guide/regressor/LinearRegression/#normal-equations-closed-form-solution","text":"The closed-form solution should be preferred for \"smaller\" datasets where calculating (a \"costly\") matrix inverse is not a concern. For very large datasets, or datasets where the inverse of [X^T X] may not exist (the matrix is non-invertible or singular, e.g., in case of perfect multicollinearity), the gradient descent or stochastic gradient descent approaches are to be preferred. The linear function (linear regression model) is defined as: y = w_0x_0 + w_1x_1 + ... + w_mx_m = \\sum_{i=0}^{n} = \\mathbf{w}^T\\mathbf{x} where y is the response variable, \\mathbf{x} is an m -dimensional sample vector, and \\mathbf{w} is the weight vector (vector of coefficients). Note that w_0 represents the y-axis intercept of the model and therefore x_0=1 . Using the closed-form solution (normal equation), we compute the weights of the model as follows: \\mathbf{w} = (\\mathbf{X}^T\\mathbf{X})^{-1}\\mathbf{X}^Ty","title":"Normal Equations (closed-form solution)"},{"location":"user_guide/regressor/LinearRegression/#gradient-descent-gd-and-stochastic-gradient-descent-sgd","text":"See Gradient Descent and Stochastic Gradient Descent and Deriving the Gradient Descent Rule for Linear Regression and Adaline for details. Random shuffling is implemented as: for one or more epochs randomly shuffle samples in the training set for training sample i compute gradients and perform weight updates","title":"Gradient Descent (GD) and Stochastic Gradient Descent (SGD)"},{"location":"user_guide/regressor/LinearRegression/#references","text":"F. Galton. Regression towards mediocrity in hereditary stature . Journal of the Anthropological Institute of Great Britain and Ireland, pages 246\u2013263, 1886. A. I. Khuri. Introduction to linear regression analysis , by Douglas C. Montgomery, Elizabeth A. Peck, G. Geoffrey Vining. International Statistical Review, 81(2):318\u2013319, 2013. D. S. G. Pollock. The Classical Linear Regression Model .","title":"References"},{"location":"user_guide/regressor/LinearRegression/#example-1-closed-form-solution","text":"import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) ne_lr = LinearRegression(minibatches=None) ne_lr.fit(X, y) print('Intercept: %.2f' % ne_lr.b_) print('Slope: %.2f' % ne_lr.w_[0]) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, ne_lr) plt.show() Intercept: 0.25 Slope: 0.81","title":"Example 1 - Closed Form Solution"},{"location":"user_guide/regressor/LinearRegression/#example-2-gradient-descent","text":"import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) gd_lr = LinearRegression(eta=0.005, epochs=100, minibatches=1, random_seed=123, print_progress=3) gd_lr.fit(X, y) print('Intercept: %.2f' % gd_lr.b_) print('Slope: %.2f' % gd_lr.w_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, gd_lr) plt.show() Iteration: 100/100 | Cost 0.08 | Elapsed: 0:00:00 | ETA: 0:00:000 Intercept: 0.22 Slope: 0.82 # Visualizing the cost to check for convergence and plotting the linear model: plt.plot(range(1, gd_lr.epochs+1), gd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show()","title":"Example 2 - Gradient Descent"},{"location":"user_guide/regressor/LinearRegression/#example-3-stochastic-gradient-descent","text":"import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) sgd_lr = LinearRegression(eta=0.01, epochs=100, random_seed=0, minibatches=len(y)) sgd_lr.fit(X, y) print('Intercept: %.2f' % sgd_lr.w_) print('Slope: %.2f' % sgd_lr.b_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, sgd_lr) plt.show() Intercept: 0.82 Slope: 0.24 plt.plot(range(1, sgd_lr.epochs+1), sgd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show()","title":"Example 3 - Stochastic Gradient Descent"},{"location":"user_guide/regressor/LinearRegression/#example-3-stochastic-gradient-descent-with-minibatches","text":"import numpy as np import matplotlib.pyplot as plt from mlxtend.regressor import LinearRegression X = np.array([ 1.0, 2.1, 3.6, 4.2, 6])[:, np.newaxis] y = np.array([ 1.0, 2.0, 3.0, 4.0, 5.0]) sgd_lr = LinearRegression(eta=0.01, epochs=100, random_seed=0, minibatches=3) sgd_lr.fit(X, y) print('Intercept: %.2f' % sgd_lr.b_) print('Slope: %.2f' % sgd_lr.w_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') plt.plot(X, model.predict(X), color='red') return lin_regplot(X, y, sgd_lr) plt.show() Intercept: 0.24 Slope: 0.82 plt.plot(range(1, sgd_lr.epochs+1), sgd_lr.cost_) plt.xlabel('Epochs') plt.ylabel('Cost') plt.ylim([0, 0.2]) plt.tight_layout() plt.show()","title":"Example 3 - Stochastic Gradient Descent with Minibatches"},{"location":"user_guide/regressor/LinearRegression/#api","text":"LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0) Ordinary least squares linear regression. Parameters eta : float (default: 0.01) solver rate (between 0.0 and 1.0) epochs : int (default: 50) Passes over the training dataset. Prior to each epoch, the dataset is shuffled if minibatches > 1 to prevent cycles in stochastic gradient descent. minibatches : int (default: None) The number of minibatches for gradient-based optimization. If None: Normal Equations (closed-form solution) If 1: Gradient Descent learning If len(y): Stochastic Gradient Descent learning If 1 < minibatches < len(y): Minibatch learning random_seed : int (default: None) Set random state for shuffling and initializing the weights. print_progress : int (default: 0) Prints progress in fitting to stderr if not solver='normal equation' 0: No output 1: Epochs elapsed and cost 2: 1 plus time elapsed 3: 2 plus estimated time until completion Attributes w_ : 2d-array, shape={n_features, 1} Model weights after fitting. b_ : 1d-array, shape={1,} Bias unit after fitting. cost_ : list Sum of squared errors after each epoch; ignored if solver='normal equation' Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/","title":"API"},{"location":"user_guide/regressor/LinearRegression/#methods","text":"fit(X, y, init_params=True) Learn model from training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. init_params : bool (default: True) Re-initializes model parameters prior to fitting. Set False to continue training with weights from a previous model fitting. Returns self : object get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.' adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"Methods"},{"location":"user_guide/regressor/LinearRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"user_guide/regressor/LinearRegression/#license-bsd-3-clause","text":"predict(X) Predict targets from X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns target_values : array-like, shape = [n_samples] Predicted target values. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self adapted from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py","title":"License: BSD 3 clause"},{"location":"user_guide/regressor/LinearRegression/#author-gael-varoquaux-amp103amp97amp101amp108amp46amp118amp97amp114amp111amp113amp117amp97amp117amp120amp64amp110amp111amp114amp109amp97amp108amp101amp115amp117amp112amp46amp111amp114amp103_1","text":"","title":"Author: Gael Varoquaux gael.varoquaux@normalesup.org"},{"location":"user_guide/regressor/LinearRegression/#license-bsd-3-clause_1","text":"","title":"License: BSD 3 clause"},{"location":"user_guide/regressor/StackingCVRegressor/","text":"StackingCVRegressor An ensemble-learning meta-regressor for stacking regression from mlxtend.regressor import StackingCVRegressor Overview Stacking is an ensemble learning technique to combine multiple regression models via a meta-regressor. The StackingCVRegressor extends the standard stacking algorithm (implemented as StackingRegressor ) using out-of-fold predictions to prepare the input data for the level-2 regressor. In the standard stacking procedure, the first-level regressors are fit to the same training set that is used prepare the inputs for the second-level regressor, which may lead to overfitting. The StackingCVRegressor , however, uses the concept of out-of-fold predictions: the dataset is split into k folds, and in k successive rounds, k-1 folds are used to fit the first level regressor. In each round, the first-level regressors are then applied to the remaining 1 subset that was not used for model fitting in each iteration. The resulting predictions are then stacked and provided -- as input data -- to the second-level regressor. After the training of the StackingCVRegressor , the first-level regressors are fit to the entire dataset for optimal predicitons. References Breiman, Leo. \" Stacked regressions. \" Machine learning 24.1 (1996): 49-64. Analogous implementation: StackingCVClassifier Example 1: Boston Housing Data Predictions In this example we evaluate some basic prediction models on the boston housing dataset and see how the R^2 and MSE scores are affected by combining the models with StackingCVRegressor . The code output below demonstrates that the stacked model performs the best on this dataset -- slightly better than the best single regression model. from mlxtend.regressor import StackingCVRegressor from sklearn.datasets import load_boston from sklearn.svm import SVR from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import cross_val_score import numpy as np RANDOM_SEED = 42 X, y = load_boston(return_X_y=True) svr = SVR(kernel='linear') lasso = Lasso() rf = RandomForestRegressor(n_estimators=5, random_state=RANDOM_SEED) # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(svr, lasso, rf), meta_regressor=lasso) print('5-fold cross validation scores:\\n') for clf, label in zip([svr, lasso, rf, stack], ['SVM', 'Lasso', 'Random Forest', 'StackingCVRegressor']): scores = cross_val_score(clf, X, y, cv=5) print(\"R^2 Score: %0.2f (+/- %0.2f) [%s]\" % ( scores.mean(), scores.std(), label)) 5-fold cross validation scores: R^2 Score: 0.45 (+/- 0.29) [SVM] R^2 Score: 0.43 (+/- 0.14) [Lasso] R^2 Score: 0.52 (+/- 0.28) [Random Forest] R^2 Score: 0.58 (+/- 0.24) [StackingCVRegressor] # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(svr, lasso, rf), meta_regressor=lasso) print('5-fold cross validation scores:\\n') for clf, label in zip([svr, lasso, rf, stack], ['SVM', 'Lasso', 'Random Forest', 'StackingCVRegressor']): scores = cross_val_score(clf, X, y, cv=5, scoring='neg_mean_squared_error') print(\"Neg. MSE Score: %0.2f (+/- %0.2f) [%s]\" % ( scores.mean(), scores.std(), label)) 5-fold cross validation scores: Neg. MSE Score: -33.69 (+/- 22.36) [SVM] Neg. MSE Score: -35.53 (+/- 16.99) [Lasso] Neg. MSE Score: -27.32 (+/- 16.62) [Random Forest] Neg. MSE Score: -25.64 (+/- 18.11) [StackingCVRegressor] Example 2: GridSearchCV with Stacking In this second example we demonstrate how StackingCVRegressor works in combination with GridSearchCV . The stack still allows tuning hyper parameters of the base and meta models! To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from mlxtend.regressor import StackingCVRegressor from sklearn.datasets import load_boston from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import GridSearchCV X, y = load_boston(return_X_y=True) ridge = Ridge() lasso = Lasso() rf = RandomForestRegressor(random_state=RANDOM_SEED) # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(lasso, ridge), meta_regressor=rf, use_features_in_secondary=True) params = {'lasso__alpha': [0.1, 1.0, 10.0], 'ridge__alpha': [0.1, 1.0, 10.0]} grid = GridSearchCV( estimator=stack, param_grid={ 'lasso__alpha': [x/5.0 for x in range(1, 10)], 'ridge__alpha': [x/20.0 for x in range(1, 10)], 'meta-randomforestregressor__n_estimators': [10, 100] }, cv=5, refit=True ) grid.fit(X, y) print(\"Best: %f using %s\" % (grid.best_score_, grid.best_params_)) Best: 0.673590 using {'lasso__alpha': 0.4, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) if r > 10: break print('...') print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.622 +/- 0.10 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.05} 0.649 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.1} 0.650 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.15} 0.667 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.2} 0.629 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.25} 0.663 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} 0.633 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.35} 0.637 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.4} 0.649 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.45} 0.653 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.05} 0.648 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.1} 0.645 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.15} ... Best parameters: {'lasso__alpha': 0.4, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} Accuracy: 0.67 Note The StackingCVRegressor also enables grid search over the regressors argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, different regressors and regressor parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestregressor__n_estimators': [1, 100], 'regressors': [(regr1, regr1, regr1), (regr2, regr3)]} it will use the instance settings of regr1 , regr2 , and regr3 and not overwrite it with the 'n_estimators' settings from 'randomforestregressor__n_estimators': [1, 100] . API StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/ Methods fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"StackingCVRegressor"},{"location":"user_guide/regressor/StackingCVRegressor/#stackingcvregressor","text":"An ensemble-learning meta-regressor for stacking regression from mlxtend.regressor import StackingCVRegressor","title":"StackingCVRegressor"},{"location":"user_guide/regressor/StackingCVRegressor/#overview","text":"Stacking is an ensemble learning technique to combine multiple regression models via a meta-regressor. The StackingCVRegressor extends the standard stacking algorithm (implemented as StackingRegressor ) using out-of-fold predictions to prepare the input data for the level-2 regressor. In the standard stacking procedure, the first-level regressors are fit to the same training set that is used prepare the inputs for the second-level regressor, which may lead to overfitting. The StackingCVRegressor , however, uses the concept of out-of-fold predictions: the dataset is split into k folds, and in k successive rounds, k-1 folds are used to fit the first level regressor. In each round, the first-level regressors are then applied to the remaining 1 subset that was not used for model fitting in each iteration. The resulting predictions are then stacked and provided -- as input data -- to the second-level regressor. After the training of the StackingCVRegressor , the first-level regressors are fit to the entire dataset for optimal predicitons.","title":"Overview"},{"location":"user_guide/regressor/StackingCVRegressor/#references","text":"Breiman, Leo. \" Stacked regressions. \" Machine learning 24.1 (1996): 49-64. Analogous implementation: StackingCVClassifier","title":"References"},{"location":"user_guide/regressor/StackingCVRegressor/#example-1-boston-housing-data-predictions","text":"In this example we evaluate some basic prediction models on the boston housing dataset and see how the R^2 and MSE scores are affected by combining the models with StackingCVRegressor . The code output below demonstrates that the stacked model performs the best on this dataset -- slightly better than the best single regression model. from mlxtend.regressor import StackingCVRegressor from sklearn.datasets import load_boston from sklearn.svm import SVR from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import cross_val_score import numpy as np RANDOM_SEED = 42 X, y = load_boston(return_X_y=True) svr = SVR(kernel='linear') lasso = Lasso() rf = RandomForestRegressor(n_estimators=5, random_state=RANDOM_SEED) # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(svr, lasso, rf), meta_regressor=lasso) print('5-fold cross validation scores:\\n') for clf, label in zip([svr, lasso, rf, stack], ['SVM', 'Lasso', 'Random Forest', 'StackingCVRegressor']): scores = cross_val_score(clf, X, y, cv=5) print(\"R^2 Score: %0.2f (+/- %0.2f) [%s]\" % ( scores.mean(), scores.std(), label)) 5-fold cross validation scores: R^2 Score: 0.45 (+/- 0.29) [SVM] R^2 Score: 0.43 (+/- 0.14) [Lasso] R^2 Score: 0.52 (+/- 0.28) [Random Forest] R^2 Score: 0.58 (+/- 0.24) [StackingCVRegressor] # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(svr, lasso, rf), meta_regressor=lasso) print('5-fold cross validation scores:\\n') for clf, label in zip([svr, lasso, rf, stack], ['SVM', 'Lasso', 'Random Forest', 'StackingCVRegressor']): scores = cross_val_score(clf, X, y, cv=5, scoring='neg_mean_squared_error') print(\"Neg. MSE Score: %0.2f (+/- %0.2f) [%s]\" % ( scores.mean(), scores.std(), label)) 5-fold cross validation scores: Neg. MSE Score: -33.69 (+/- 22.36) [SVM] Neg. MSE Score: -35.53 (+/- 16.99) [Lasso] Neg. MSE Score: -27.32 (+/- 16.62) [Random Forest] Neg. MSE Score: -25.64 (+/- 18.11) [StackingCVRegressor]","title":"Example 1: Boston Housing Data Predictions"},{"location":"user_guide/regressor/StackingCVRegressor/#example-2-gridsearchcv-with-stacking","text":"In this second example we demonstrate how StackingCVRegressor works in combination with GridSearchCV . The stack still allows tuning hyper parameters of the base and meta models! To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from mlxtend.regressor import StackingCVRegressor from sklearn.datasets import load_boston from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import GridSearchCV X, y = load_boston(return_X_y=True) ridge = Ridge() lasso = Lasso() rf = RandomForestRegressor(random_state=RANDOM_SEED) # The StackingCVRegressor uses scikit-learn's check_cv # internally, which doesn't support a random seed. Thus # NumPy's random seed need to be specified explicitely for # deterministic behavior np.random.seed(RANDOM_SEED) stack = StackingCVRegressor(regressors=(lasso, ridge), meta_regressor=rf, use_features_in_secondary=True) params = {'lasso__alpha': [0.1, 1.0, 10.0], 'ridge__alpha': [0.1, 1.0, 10.0]} grid = GridSearchCV( estimator=stack, param_grid={ 'lasso__alpha': [x/5.0 for x in range(1, 10)], 'ridge__alpha': [x/20.0 for x in range(1, 10)], 'meta-randomforestregressor__n_estimators': [10, 100] }, cv=5, refit=True ) grid.fit(X, y) print(\"Best: %f using %s\" % (grid.best_score_, grid.best_params_)) Best: 0.673590 using {'lasso__alpha': 0.4, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} cv_keys = ('mean_test_score', 'std_test_score', 'params') for r, _ in enumerate(grid.cv_results_['mean_test_score']): print(\"%0.3f +/- %0.2f %r\" % (grid.cv_results_[cv_keys[0]][r], grid.cv_results_[cv_keys[1]][r] / 2.0, grid.cv_results_[cv_keys[2]][r])) if r > 10: break print('...') print('Best parameters: %s' % grid.best_params_) print('Accuracy: %.2f' % grid.best_score_) 0.622 +/- 0.10 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.05} 0.649 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.1} 0.650 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.15} 0.667 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.2} 0.629 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.25} 0.663 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} 0.633 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.35} 0.637 +/- 0.08 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.4} 0.649 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.45} 0.653 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.05} 0.648 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.1} 0.645 +/- 0.09 {'lasso__alpha': 0.2, 'meta-randomforestregressor__n_estimators': 100, 'ridge__alpha': 0.15} ... Best parameters: {'lasso__alpha': 0.4, 'meta-randomforestregressor__n_estimators': 10, 'ridge__alpha': 0.3} Accuracy: 0.67 Note The StackingCVRegressor also enables grid search over the regressors argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, different regressors and regressor parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestregressor__n_estimators': [1, 100], 'regressors': [(regr1, regr1, regr1), (regr2, regr3)]} it will use the instance settings of regr1 , regr2 , and regr3 and not overwrite it with the 'n_estimators' settings from 'randomforestregressor__n_estimators': [1, 100] .","title":"Example 2: GridSearchCV with Stacking"},{"location":"user_guide/regressor/StackingCVRegressor/#api","text":"StackingCVRegressor(regressors, meta_regressor, cv=5, shuffle=True, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A 'Stacking Cross-Validation' regressor for scikit-learn estimators. New in mlxtend v0.7.0 Notes The StackingCVRegressor uses scikit-learn's check_cv internally, which doesn't support a random seed. Thus NumPy's random seed need to be specified explicitely for deterministic behavior, for instance, by setting np.random.seed(RANDOM_SEED) prior to fitting the StackingCVRegressor Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingCVRegressor will fit clones of these original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressor cv : int, cross-validation generator or iterable, optional (default: 5) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a KFold , - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, it will use KFold cross-validation use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. shuffle : bool (default: True) If True, and the cv argument is integer, the training data will be shuffled at fitting stage prior to cross-validation. If the cv argument is a specific cross validation technique, this argument is omitted. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Attributes train_meta_features : numpy array, shape = [n_samples, n_regressors] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/","title":"API"},{"location":"user_guide/regressor/StackingCVRegressor/#methods","text":"fit(X, y, groups=None, sample_weight=None) Fit ensemble regressors and the meta-regressor. Parameters X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : numpy array, shape = [n_samples] Target values. groups : numpy array/None, shape = [n_samples] The group that each sample belongs to. This is used by specific folding strategies such as GroupKFold() sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"user_guide/regressor/StackingRegressor/","text":"StackingRegressor An ensemble-learning meta-regressor for stacking regression from mlxtend.regressor import StackingRegressor Overview Stacking regression is an ensemble learning technique to combine multiple regression models via a meta-regressor. The individual regression models are trained based on the complete training set; then, the meta-regressor is fitted based on the outputs -- meta-features -- of the individual regression models in the ensemble. References Breiman, Leo. \" Stacked regressions. \" Machine learning 24.1 (1996): 49-64. Example 1 - Simple Stacked Regression from mlxtend.regressor import StackingRegressor from mlxtend.data import boston_housing_data from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.svm import SVR import matplotlib.pyplot as plt import numpy as np # Generating a sample dataset np.random.seed(1) X = np.sort(5 * np.random.rand(40, 1), axis=0) y = np.sin(X).ravel() y[::5] += 3 * (0.5 - np.random.rand(8)) # Initializing models lr = LinearRegression() svr_lin = SVR(kernel='linear') ridge = Ridge(random_state=1) svr_rbf = SVR(kernel='rbf') stregr = StackingRegressor(regressors=[svr_lin, lr, ridge], meta_regressor=svr_rbf) # Training the stacking classifier stregr.fit(X, y) stregr.predict(X) # Evaluate and visualize the fit print(\"Mean Squared Error: %.4f\" % np.mean((stregr.predict(X) - y) ** 2)) print('Variance Score: %.4f' % stregr.score(X, y)) with plt.style.context(('seaborn-whitegrid')): plt.scatter(X, y, c='lightgray') plt.plot(X, stregr.predict(X), c='darkgreen', lw=2) plt.show() Mean Squared Error: 0.2039 Variance Score: 0.7049 stregr StackingRegressor(meta_regressor=SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma='auto', kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False), regressors=[SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma='auto', kernel='linear', max_iter=-1, shrinking=True, tol=0.001, verbose=False), LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False), Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=1, solver='auto', tol=0.001)], verbose=0) Example 2 - Stacked Regression and GridSearch To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso # Initializing models lr = LinearRegression() svr_lin = SVR(kernel='linear') ridge = Ridge(random_state=1) lasso = Lasso(random_state=1) svr_rbf = SVR(kernel='rbf') regressors = [svr_lin, lr, ridge, lasso] stregr = StackingRegressor(regressors=regressors, meta_regressor=svr_rbf) params = {'lasso__alpha': [0.1, 1.0, 10.0], 'ridge__alpha': [0.1, 1.0, 10.0], 'svr__C': [0.1, 1.0, 10.0], 'meta-svr__C': [0.1, 1.0, 10.0, 100.0], 'meta-svr__gamma': [0.1, 1.0, 10.0]} grid = GridSearchCV(estimator=stregr, param_grid=params, cv=5, refit=True) grid.fit(X, y) for params, mean_score, scores in grid.grid_scores_: print(\"%0.3f +/- %0.2f %r\" % (mean_score, scores.std() / 2.0, params)) -9.810 +/- 6.86 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.591 +/- 6.67 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.591 +/- 6.67 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.819 +/- 6.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.600 +/- 6.68 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.600 +/- 6.68 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.878 +/- 6.91 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.665 +/- 6.71 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.665 +/- 6.71 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -4.839 +/- 3.98 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -3.986 +/- 3.16 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -3.986 +/- 3.16 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.875 +/- 4.01 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.005 +/- 3.17 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.005 +/- 3.17 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.162 +/- 4.27 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.166 +/- 3.31 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.166 +/- 3.31 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.872 +/- 3.05 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.566 +/- 3.72 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.566 +/- 3.72 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.848 +/- 3.03 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.550 +/- 3.70 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.550 +/- 3.70 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.674 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.387 +/- 3.55 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.387 +/- 3.55 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.857 +/- 4.32 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.105 +/- 3.69 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.081 +/- 3.69 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.866 +/- 4.33 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.144 +/- 3.71 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.144 +/- 3.71 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.952 +/- 4.37 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.452 +/- 3.94 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.452 +/- 3.94 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.240 +/- 0.18 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.083 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.083 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.251 +/- 0.19 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.086 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.086 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.270 +/- 0.20 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.107 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.107 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.639 +/- 1.12 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.256 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.256 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.616 +/- 1.10 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.237 +/- 1.68 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.237 +/- 1.68 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.437 +/- 0.95 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.096 +/- 1.57 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.096 +/- 1.57 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.362 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.671 +/- 0.22 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.670 +/- 0.22 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.404 +/- 0.91 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.682 +/- 0.23 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.682 +/- 0.23 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.692 +/- 1.16 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.819 +/- 0.34 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.819 +/- 0.34 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.159 +/- 1.13 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.734 +/- 0.72 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.734 +/- 0.72 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.200 +/- 1.17 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.751 +/- 0.74 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.750 +/- 0.73 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.239 +/- 1.21 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.890 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.889 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.735 +/- 0.52 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.247 +/- 0.81 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.247 +/- 0.81 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.725 +/- 0.52 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.212 +/- 0.79 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.211 +/- 0.79 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.640 +/- 0.48 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.980 +/- 0.63 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.979 +/- 0.63 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.669 +/- 2.59 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.038 +/- 2.95 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.037 +/- 2.95 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.671 +/- 2.60 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.957 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.952 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.660 +/- 2.59 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.997 +/- 2.93 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.999 +/- 2.93 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.648 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.371 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.370 +/- 1.40 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.679 +/- 1.73 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.371 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.893 +/- 1.94 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.377 +/- 1.43 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.377 +/- 1.42 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -4.113 +/- 3.15 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -13.276 +/- 9.35 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -13.287 +/- 9.36 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -3.946 +/- 3.11 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -12.797 +/- 8.93 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -12.797 +/- 8.93 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -3.551 +/- 2.90 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.457 +/- 6.08 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.447 +/- 6.08 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.941 +/- 6.89 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.953 +/- 6.90 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -10.035 +/- 6.93 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -5.238 +/- 4.24 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.277 +/- 4.28 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.584 +/- 4.56 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.649 +/- 2.88 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.625 +/- 2.86 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.430 +/- 2.69 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -6.131 +/- 4.33 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.150 +/- 4.34 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.300 +/- 4.44 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.286 +/- 0.21 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.290 +/- 0.21 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.263 +/- 0.19 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.162 +/- 0.14 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.161 +/- 0.14 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.386 +/- 0.96 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.361 +/- 0.94 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.182 +/- 0.79 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.873 +/- 1.43 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.874 +/- 1.44 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.775 +/- 1.14 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.902 +/- 0.32 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.903 +/- 0.32 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.812 +/- 1.17 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.923 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.922 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -2.085 +/- 1.44 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.080 +/- 0.47 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.079 +/- 0.47 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.208 +/- 1.22 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.865 +/- 0.87 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.864 +/- 0.87 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.218 +/- 1.23 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.881 +/- 0.89 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.877 +/- 0.89 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.39 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.031 +/- 1.05 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.034 +/- 1.05 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.532 +/- 0.38 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.524 +/- 0.37 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.847 +/- 0.55 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.848 +/- 0.55 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.445 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.669 +/- 0.43 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.670 +/- 0.43 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.682 +/- 2.59 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.688 +/- 2.59 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.022 +/- 2.93 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.019 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.586 +/- 2.48 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.771 +/- 2.68 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.772 +/- 2.68 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.901 +/- 1.93 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.933 +/- 1.96 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.388 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.387 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.159 +/- 2.17 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.620 +/- 1.60 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.549 +/- 5.97 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.543 +/- 5.97 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.607 +/- 1.54 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.940 +/- 5.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.962 +/- 5.45 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.615 +/- 1.28 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.429 +/- 3.35 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.418 +/- 3.35 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.941 +/- 6.89 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.953 +/- 6.90 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -10.035 +/- 6.93 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -5.238 +/- 4.24 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.277 +/- 4.28 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.584 +/- 4.56 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.649 +/- 2.88 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.625 +/- 2.86 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.430 +/- 2.69 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -6.131 +/- 4.33 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.150 +/- 4.34 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.300 +/- 4.44 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.286 +/- 0.21 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.290 +/- 0.21 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.263 +/- 0.19 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.162 +/- 0.14 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.161 +/- 0.14 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.386 +/- 0.96 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.361 +/- 0.94 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.182 +/- 0.79 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.873 +/- 1.43 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.874 +/- 1.44 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.775 +/- 1.14 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.902 +/- 0.32 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.903 +/- 0.32 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.812 +/- 1.17 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.923 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.922 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -2.085 +/- 1.44 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.080 +/- 0.47 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.079 +/- 0.47 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.208 +/- 1.22 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.865 +/- 0.87 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.864 +/- 0.87 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.218 +/- 1.23 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.881 +/- 0.89 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.877 +/- 0.89 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.39 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.031 +/- 1.05 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.034 +/- 1.05 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.532 +/- 0.38 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.524 +/- 0.37 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.847 +/- 0.55 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.848 +/- 0.55 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.445 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.669 +/- 0.43 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.670 +/- 0.43 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.682 +/- 2.59 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.688 +/- 2.59 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.022 +/- 2.93 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.019 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.586 +/- 2.48 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.771 +/- 2.68 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.772 +/- 2.68 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.901 +/- 1.93 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.933 +/- 1.96 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.388 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.387 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.159 +/- 2.17 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.620 +/- 1.60 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.549 +/- 5.97 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.543 +/- 5.97 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.607 +/- 1.54 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.940 +/- 5.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.962 +/- 5.45 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.615 +/- 1.28 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.429 +/- 3.35 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.418 +/- 3.35 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} /Users/Sebastian/miniconda3/lib/python3.5/site-packages/sklearn/model_selection/_search.py:662: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning) # Evaluate and visualize the fit print(\"Mean Squared Error: %.4f\" % np.mean((grid.predict(X) - y) ** 2)) print('Variance Score: %.4f' % grid.score(X, y)) with plt.style.context(('seaborn-whitegrid')): plt.scatter(X, y, c='lightgray') plt.plot(X, grid.predict(X), c='darkgreen', lw=2) plt.show() Mean Squared Error: 0.1844 Variance Score: 0.7331 Note The StackingRegressor also enables grid search over the regressors argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestregressor__n_estimators': [1, 100], 'regressors': [(regr1, regr1, regr1), (regr2, regr3)]} it will use the instance settings of regr1 , regr2 , and regr3 and not overwrite it with the 'n_estimators' settings from 'randomforestregressor__n_estimators': [1, 100] . API StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/ Methods fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self Properties coef_ None intercept_ None","title":"StackingRegressor"},{"location":"user_guide/regressor/StackingRegressor/#stackingregressor","text":"An ensemble-learning meta-regressor for stacking regression from mlxtend.regressor import StackingRegressor","title":"StackingRegressor"},{"location":"user_guide/regressor/StackingRegressor/#overview","text":"Stacking regression is an ensemble learning technique to combine multiple regression models via a meta-regressor. The individual regression models are trained based on the complete training set; then, the meta-regressor is fitted based on the outputs -- meta-features -- of the individual regression models in the ensemble.","title":"Overview"},{"location":"user_guide/regressor/StackingRegressor/#references","text":"Breiman, Leo. \" Stacked regressions. \" Machine learning 24.1 (1996): 49-64.","title":"References"},{"location":"user_guide/regressor/StackingRegressor/#example-1-simple-stacked-regression","text":"from mlxtend.regressor import StackingRegressor from mlxtend.data import boston_housing_data from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.svm import SVR import matplotlib.pyplot as plt import numpy as np # Generating a sample dataset np.random.seed(1) X = np.sort(5 * np.random.rand(40, 1), axis=0) y = np.sin(X).ravel() y[::5] += 3 * (0.5 - np.random.rand(8)) # Initializing models lr = LinearRegression() svr_lin = SVR(kernel='linear') ridge = Ridge(random_state=1) svr_rbf = SVR(kernel='rbf') stregr = StackingRegressor(regressors=[svr_lin, lr, ridge], meta_regressor=svr_rbf) # Training the stacking classifier stregr.fit(X, y) stregr.predict(X) # Evaluate and visualize the fit print(\"Mean Squared Error: %.4f\" % np.mean((stregr.predict(X) - y) ** 2)) print('Variance Score: %.4f' % stregr.score(X, y)) with plt.style.context(('seaborn-whitegrid')): plt.scatter(X, y, c='lightgray') plt.plot(X, stregr.predict(X), c='darkgreen', lw=2) plt.show() Mean Squared Error: 0.2039 Variance Score: 0.7049 stregr StackingRegressor(meta_regressor=SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma='auto', kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False), regressors=[SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma='auto', kernel='linear', max_iter=-1, shrinking=True, tol=0.001, verbose=False), LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False), Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=1, solver='auto', tol=0.001)], verbose=0)","title":"Example 1 - Simple Stacked Regression"},{"location":"user_guide/regressor/StackingRegressor/#example-2-stacked-regression-and-gridsearch","text":"To set up a parameter grid for scikit-learn's GridSearch , we simply provide the estimator's names in the parameter grid -- in the special case of the meta-regressor, we append the 'meta-' prefix. from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso # Initializing models lr = LinearRegression() svr_lin = SVR(kernel='linear') ridge = Ridge(random_state=1) lasso = Lasso(random_state=1) svr_rbf = SVR(kernel='rbf') regressors = [svr_lin, lr, ridge, lasso] stregr = StackingRegressor(regressors=regressors, meta_regressor=svr_rbf) params = {'lasso__alpha': [0.1, 1.0, 10.0], 'ridge__alpha': [0.1, 1.0, 10.0], 'svr__C': [0.1, 1.0, 10.0], 'meta-svr__C': [0.1, 1.0, 10.0, 100.0], 'meta-svr__gamma': [0.1, 1.0, 10.0]} grid = GridSearchCV(estimator=stregr, param_grid=params, cv=5, refit=True) grid.fit(X, y) for params, mean_score, scores in grid.grid_scores_: print(\"%0.3f +/- %0.2f %r\" % (mean_score, scores.std() / 2.0, params)) -9.810 +/- 6.86 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.591 +/- 6.67 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.591 +/- 6.67 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.819 +/- 6.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.600 +/- 6.68 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.600 +/- 6.68 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.878 +/- 6.91 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.665 +/- 6.71 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.665 +/- 6.71 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -4.839 +/- 3.98 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -3.986 +/- 3.16 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -3.986 +/- 3.16 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.875 +/- 4.01 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.005 +/- 3.17 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.005 +/- 3.17 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.162 +/- 4.27 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.166 +/- 3.31 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.166 +/- 3.31 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.872 +/- 3.05 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.566 +/- 3.72 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.566 +/- 3.72 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.848 +/- 3.03 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.550 +/- 3.70 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.550 +/- 3.70 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.674 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.387 +/- 3.55 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.387 +/- 3.55 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.857 +/- 4.32 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.105 +/- 3.69 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.081 +/- 3.69 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.866 +/- 4.33 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.144 +/- 3.71 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.144 +/- 3.71 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.952 +/- 4.37 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.452 +/- 3.94 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.452 +/- 3.94 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.240 +/- 0.18 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.083 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.083 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.251 +/- 0.19 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.086 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.086 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.270 +/- 0.20 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.107 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.107 +/- 0.12 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.639 +/- 1.12 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.256 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.256 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.616 +/- 1.10 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.237 +/- 1.68 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.237 +/- 1.68 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.437 +/- 0.95 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.096 +/- 1.57 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.096 +/- 1.57 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.362 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.671 +/- 0.22 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.670 +/- 0.22 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.404 +/- 0.91 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.682 +/- 0.23 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.682 +/- 0.23 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.692 +/- 1.16 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.819 +/- 0.34 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.819 +/- 0.34 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.159 +/- 1.13 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.734 +/- 0.72 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.734 +/- 0.72 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.200 +/- 1.17 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.751 +/- 0.74 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.750 +/- 0.73 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.239 +/- 1.21 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.890 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.889 +/- 0.87 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.735 +/- 0.52 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.247 +/- 0.81 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.247 +/- 0.81 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.725 +/- 0.52 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.212 +/- 0.79 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -1.211 +/- 0.79 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.640 +/- 0.48 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.980 +/- 0.63 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.979 +/- 0.63 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.669 +/- 2.59 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.038 +/- 2.95 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.037 +/- 2.95 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.671 +/- 2.60 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.957 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.952 +/- 2.87 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.660 +/- 2.59 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.997 +/- 2.93 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.999 +/- 2.93 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.648 +/- 1.70 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.371 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.370 +/- 1.40 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.679 +/- 1.73 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.371 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.41 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.893 +/- 1.94 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.377 +/- 1.43 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.377 +/- 1.42 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -4.113 +/- 3.15 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -13.276 +/- 9.35 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -13.287 +/- 9.36 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -3.946 +/- 3.11 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -12.797 +/- 8.93 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -12.797 +/- 8.93 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -3.551 +/- 2.90 {'lasso__alpha': 0.1, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.457 +/- 6.08 {'lasso__alpha': 0.1, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.447 +/- 6.08 {'lasso__alpha': 0.1, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.941 +/- 6.89 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.953 +/- 6.90 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -10.035 +/- 6.93 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -5.238 +/- 4.24 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.277 +/- 4.28 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.584 +/- 4.56 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.649 +/- 2.88 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.625 +/- 2.86 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.430 +/- 2.69 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -6.131 +/- 4.33 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.150 +/- 4.34 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.300 +/- 4.44 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.286 +/- 0.21 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.290 +/- 0.21 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.263 +/- 0.19 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.162 +/- 0.14 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.161 +/- 0.14 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.386 +/- 0.96 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.361 +/- 0.94 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.182 +/- 0.79 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.873 +/- 1.43 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.874 +/- 1.44 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.775 +/- 1.14 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.902 +/- 0.32 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.903 +/- 0.32 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.812 +/- 1.17 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.923 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.922 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -2.085 +/- 1.44 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.080 +/- 0.47 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.079 +/- 0.47 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.208 +/- 1.22 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.865 +/- 0.87 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.864 +/- 0.87 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.218 +/- 1.23 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.881 +/- 0.89 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.877 +/- 0.89 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.39 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.031 +/- 1.05 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.034 +/- 1.05 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.532 +/- 0.38 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.524 +/- 0.37 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.847 +/- 0.55 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.848 +/- 0.55 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.445 +/- 0.33 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.669 +/- 0.43 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.670 +/- 0.43 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.682 +/- 2.59 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.688 +/- 2.59 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.022 +/- 2.93 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.019 +/- 2.92 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.586 +/- 2.48 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.771 +/- 2.68 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.772 +/- 2.68 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.901 +/- 1.93 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.933 +/- 1.96 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.388 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.387 +/- 1.42 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.159 +/- 2.17 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.620 +/- 1.60 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.549 +/- 5.97 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.543 +/- 5.97 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.607 +/- 1.54 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.940 +/- 5.42 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.962 +/- 5.45 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.615 +/- 1.28 {'lasso__alpha': 1.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.429 +/- 3.35 {'lasso__alpha': 1.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.418 +/- 3.35 {'lasso__alpha': 1.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -9.941 +/- 6.89 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.716 +/- 6.70 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.953 +/- 6.90 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.725 +/- 6.71 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -10.035 +/- 6.93 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -9.793 +/- 6.74 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 0.1} -5.238 +/- 4.24 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.240 +/- 3.29 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.277 +/- 4.28 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.267 +/- 3.31 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -5.584 +/- 4.56 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.480 +/- 3.48 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 1.0} -4.649 +/- 2.88 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.364 +/- 3.56 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.625 +/- 2.86 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.343 +/- 3.55 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -4.430 +/- 2.69 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -5.172 +/- 3.39 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 0.1, 'meta-svr__gamma': 10.0} -6.131 +/- 4.33 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.607 +/- 3.90 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.150 +/- 4.34 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.653 +/- 3.94 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -6.300 +/- 4.44 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -5.957 +/- 4.14 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 0.1} -0.286 +/- 0.21 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.118 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.290 +/- 0.21 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.122 +/- 0.13 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.263 +/- 0.19 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.162 +/- 0.14 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -0.161 +/- 0.14 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 1.0} -1.386 +/- 0.96 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.040 +/- 1.58 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.361 +/- 0.94 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -2.029 +/- 1.57 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.182 +/- 0.79 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.873 +/- 1.43 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.874 +/- 1.44 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 1.0, 'meta-svr__gamma': 10.0} -1.775 +/- 1.14 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.902 +/- 0.32 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.903 +/- 0.32 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.812 +/- 1.17 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.923 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -0.922 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -2.085 +/- 1.44 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.080 +/- 0.47 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.079 +/- 0.47 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 0.1} -1.208 +/- 1.22 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.865 +/- 0.87 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.864 +/- 0.87 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.218 +/- 1.23 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.881 +/- 0.89 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.877 +/- 0.89 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.369 +/- 1.39 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.031 +/- 1.05 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -1.034 +/- 1.05 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 1.0} -0.532 +/- 0.38 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.878 +/- 0.57 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.524 +/- 0.37 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.847 +/- 0.55 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.848 +/- 0.55 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.445 +/- 0.33 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.669 +/- 0.43 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -0.670 +/- 0.43 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 10.0, 'meta-svr__gamma': 10.0} -2.682 +/- 2.59 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.012 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.688 +/- 2.59 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.022 +/- 2.93 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -3.019 +/- 2.92 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.586 +/- 2.48 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.771 +/- 2.68 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -2.772 +/- 2.68 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 0.1} -1.901 +/- 1.93 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.385 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.933 +/- 1.96 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.388 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.387 +/- 1.42 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.159 +/- 2.17 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -1.421 +/- 1.45 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 1.0} -2.620 +/- 1.60 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.549 +/- 5.97 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -8.543 +/- 5.97 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 0.1, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.607 +/- 1.54 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.940 +/- 5.42 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -7.962 +/- 5.45 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 1.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -2.615 +/- 1.28 {'lasso__alpha': 10.0, 'svr__C': 0.1, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.429 +/- 3.35 {'lasso__alpha': 10.0, 'svr__C': 1.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} -5.418 +/- 3.35 {'lasso__alpha': 10.0, 'svr__C': 10.0, 'ridge__alpha': 10.0, 'meta-svr__C': 100.0, 'meta-svr__gamma': 10.0} /Users/Sebastian/miniconda3/lib/python3.5/site-packages/sklearn/model_selection/_search.py:662: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20 DeprecationWarning) # Evaluate and visualize the fit print(\"Mean Squared Error: %.4f\" % np.mean((grid.predict(X) - y) ** 2)) print('Variance Score: %.4f' % grid.score(X, y)) with plt.style.context(('seaborn-whitegrid')): plt.scatter(X, y, c='lightgray') plt.plot(X, grid.predict(X), c='darkgreen', lw=2) plt.show() Mean Squared Error: 0.1844 Variance Score: 0.7331 Note The StackingRegressor also enables grid search over the regressors argument. However, due to the current implementation of GridSearchCV in scikit-learn, it is not possible to search over both, differenct classifiers and classifier parameters at the same time. For instance, while the following parameter dictionary works params = {'randomforestregressor__n_estimators': [1, 100], 'regressors': [(regr1, regr1, regr1), (regr2, regr3)]} it will use the instance settings of regr1 , regr2 , and regr3 and not overwrite it with the 'n_estimators' settings from 'randomforestregressor__n_estimators': [1, 100] .","title":"Example 2 - Stacked Regression and GridSearch"},{"location":"user_guide/regressor/StackingRegressor/#api","text":"StackingRegressor(regressors, meta_regressor, verbose=0, use_features_in_secondary=False, store_train_meta_features=False, refit=True) A Stacking regressor for scikit-learn estimators for regression. Parameters regressors : array-like, shape = [n_regressors] A list of regressors. Invoking the fit method on the StackingRegressor will fit clones of those original regressors that will be stored in the class attribute self.regr_ . meta_regressor : object The meta-regressor to be fitted on the ensemble of regressors verbose : int, optional (default=0) Controls the verbosity of the building process. - verbose=0 (default): Prints nothing - verbose=1 : Prints the number & name of the regressor being fitted - verbose=2 : Prints info about the parameters of the regressor being fitted - verbose>2 : Changes verbose param of the underlying regressor to self.verbose - 2 use_features_in_secondary : bool (default: False) If True, the meta-regressor will be trained both on the predictions of the original regressors and the original dataset. If False, the meta-regressor will be trained only on the predictions of the original regressors. store_train_meta_features : bool (default: False) If True, the meta-features computed from the training data used for fitting the meta-regressor stored in the self.train_meta_features_ array, which can be accessed after calling fit . Attributes regr_ : list, shape=[n_regressors] Fitted regressors (clones of the original regressors) meta_regr_ : estimator Fitted meta-regressor (clone of the original meta-estimator) coef_ : array-like, shape = [n_features] Model coefficients of the fitted meta-estimator intercept_ : float Intercept of the fitted meta-estimator train_meta_features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for training data, where n_samples is the number of samples in training data and len(self.regressors) is the number of regressors. refit : bool (default: True) Clones the regressors for stacking regression if True (default) or else uses the original ones, which will be refitted on the dataset upon calling the fit method. Setting refit=False is recommended if you are working with estimators that are supporting the scikit-learn fit/predict API interface but are not compatible to scikit-learn's clone function. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/","title":"API"},{"location":"user_guide/regressor/StackingRegressor/#methods","text":"fit(X, y, sample_weight=None) Learn weight coefficients from training data for each regressor. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples], optional Sample weights passed as sample_weights to each regressor in the regressors list as well as the meta_regressor. Raises error if some regressor does not support sample_weight in the fit() method. Returns self : object fit_transform(X, y=None, fit_params) Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array. get_params(deep=True) Return estimator parameter names for GridSearch support. predict(X) Predict target values for X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns y_target : array-like, shape = [n_samples] or [n_samples, n_targets] Predicted target values. predict_meta_features(X) Get meta-features of test-data. Parameters X : numpy array, shape = [n_samples, n_features] Test vectors, where n_samples is the number of samples and n_features is the number of features. Returns meta-features : numpy array, shape = [n_samples, len(self.regressors)] meta-features for test data, where n_samples is the number of samples in test data and len(self.regressors) is the number of regressors. score(X, y, sample_weight=None) Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y. set_params( params) Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form __ so that it's possible to update each component of a nested object. Returns self","title":"Methods"},{"location":"user_guide/regressor/StackingRegressor/#properties","text":"coef_ None intercept_ None","title":"Properties"},{"location":"user_guide/text/generalize_names/","text":"Generalize Names A function that converts a name into a general format (all lowercase) . from mlxtend.text import generalize_names Overview A function that converts a name into a general format (all lowercase) , which is useful if data is collected from different sources and is supposed to be compared or merged based on name identifiers. E.g., if names are stored in a pandas DataFrame column, the apply function can be used to generalize names: df['name'] = df['name'].apply(generalize_names) References - Example 1 - Defaults from mlxtend.text import generalize_names generalize_names('Pozo, Jos\u00e9 \u00c1ngel') 'pozo j' generalize_names('Jos\u00e9 Pozo') 'pozo j' generalize_names('Jos\u00e9 \u00c1ngel Pozo') 'pozo j' Example 2 - Optional Parameters from mlxtend.text import generalize_names generalize_names(\"Eto'o, Samuel\", firstname_output_letters=2) 'etoo sa' generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) 'etoo' generalize_names(\"Eto'o, Samuel\", output_sep=', ') 'etoo, s' API generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/","title":"Generalize Names"},{"location":"user_guide/text/generalize_names/#generalize-names","text":"A function that converts a name into a general format (all lowercase) . from mlxtend.text import generalize_names","title":"Generalize Names"},{"location":"user_guide/text/generalize_names/#overview","text":"A function that converts a name into a general format (all lowercase) , which is useful if data is collected from different sources and is supposed to be compared or merged based on name identifiers. E.g., if names are stored in a pandas DataFrame column, the apply function can be used to generalize names: df['name'] = df['name'].apply(generalize_names)","title":"Overview"},{"location":"user_guide/text/generalize_names/#references","text":"-","title":"References"},{"location":"user_guide/text/generalize_names/#example-1-defaults","text":"from mlxtend.text import generalize_names generalize_names('Pozo, Jos\u00e9 \u00c1ngel') 'pozo j' generalize_names('Jos\u00e9 Pozo') 'pozo j' generalize_names('Jos\u00e9 \u00c1ngel Pozo') 'pozo j'","title":"Example 1 - Defaults"},{"location":"user_guide/text/generalize_names/#example-2-optional-parameters","text":"from mlxtend.text import generalize_names generalize_names(\"Eto'o, Samuel\", firstname_output_letters=2) 'etoo sa' generalize_names(\"Eto'o, Samuel\", firstname_output_letters=0) 'etoo' generalize_names(\"Eto'o, Samuel\", output_sep=', ') 'etoo, s'","title":"Example 2 - Optional Parameters"},{"location":"user_guide/text/generalize_names/#api","text":"generalize_names(name, output_sep=' ', firstname_output_letters=1) Generalize a person's first and last name. Returns a person's name in the format (all lowercase) Parameters name : str Name of the player output_sep : str (default: ' ') String for separating last name and first name in the output. firstname_output_letters : int Number of letters in the abbreviated first name. Returns gen_name : str The generalized name. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/","title":"API"},{"location":"user_guide/text/generalize_names_duplcheck/","text":"Generalize Names & Duplicate Checking A function that converts a name into a general format (all lowercase) in a pandas DataFrame while avoiding duplicate entries. from mlxtend.text import generalize_names_duplcheck Overview Note that using mlxtend.text.generalize_names with few firstname_output_letters can result in duplicate entries. E.g., if your dataset contains the names \"Adam Johnson\" and \"Andrew Johnson\", the default setting (i.e., 1 first name letter) will produce the generalized name \"johnson a\" in both cases. One solution is to increase the number of first name letters in the output by setting the parameter firstname_output_letters to a value larger than 1. An alternative solution is to use the generalize_names_duplcheck function if you are working with pandas DataFrames. By default, generalize_names_duplcheck will apply generalize_names to a pandas DataFrame column with the minimum number of first name letters and append as many first name letters as necessary until no duplicates are present in the given DataFrame column. An example dataset column that contains the names References - Example 1 - Defaults Reading in a CSV file that has column Name for which we want to generalize the names: Samuel Eto'o Adam Johnson Andrew Johnson import pandas as pd from io import StringIO simulated_csv = \"name,some_value\\n\"\\ \"Samuel Eto'o,1\\n\"\\ \"Adam Johnson,1\\n\"\\ \"Andrew Johnson,1\\n\" df = pd.read_csv(StringIO(simulated_csv)) df name some_value 0 Samuel Eto'o 1 1 Adam Johnson 1 2 Andrew Johnson 1 Applying generalize_names_duplcheck to generate a new DataFrame with the generalized names without duplicates: from mlxtend.text import generalize_names_duplcheck df_new = generalize_names_duplcheck(df=df, col_name='name') df_new name some_value 0 etoo s 1 1 johnson ad 1 2 johnson an 1 API generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/","title":"Generalize Names & Duplicate Checking"},{"location":"user_guide/text/generalize_names_duplcheck/#generalize-names-duplicate-checking","text":"A function that converts a name into a general format (all lowercase) in a pandas DataFrame while avoiding duplicate entries. from mlxtend.text import generalize_names_duplcheck","title":"Generalize Names & Duplicate Checking"},{"location":"user_guide/text/generalize_names_duplcheck/#overview","text":"Note that using mlxtend.text.generalize_names with few firstname_output_letters can result in duplicate entries. E.g., if your dataset contains the names \"Adam Johnson\" and \"Andrew Johnson\", the default setting (i.e., 1 first name letter) will produce the generalized name \"johnson a\" in both cases. One solution is to increase the number of first name letters in the output by setting the parameter firstname_output_letters to a value larger than 1. An alternative solution is to use the generalize_names_duplcheck function if you are working with pandas DataFrames. By default, generalize_names_duplcheck will apply generalize_names to a pandas DataFrame column with the minimum number of first name letters and append as many first name letters as necessary until no duplicates are present in the given DataFrame column. An example dataset column that contains the names","title":"Overview"},{"location":"user_guide/text/generalize_names_duplcheck/#references","text":"-","title":"References"},{"location":"user_guide/text/generalize_names_duplcheck/#example-1-defaults","text":"Reading in a CSV file that has column Name for which we want to generalize the names: Samuel Eto'o Adam Johnson Andrew Johnson import pandas as pd from io import StringIO simulated_csv = \"name,some_value\\n\"\\ \"Samuel Eto'o,1\\n\"\\ \"Adam Johnson,1\\n\"\\ \"Andrew Johnson,1\\n\" df = pd.read_csv(StringIO(simulated_csv)) df name some_value 0 Samuel Eto'o 1 1 Adam Johnson 1 2 Andrew Johnson 1 Applying generalize_names_duplcheck to generate a new DataFrame with the generalized names without duplicates: from mlxtend.text import generalize_names_duplcheck df_new = generalize_names_duplcheck(df=df, col_name='name') df_new name some_value 0 etoo s 1 1 johnson ad 1 2 johnson an 1","title":"Example 1 - Defaults"},{"location":"user_guide/text/generalize_names_duplcheck/#api","text":"generalize_names_duplcheck(df, col_name) Generalizes names and removes duplicates. Applies mlxtend.text.generalize_names to a DataFrame with 1 first name letter by default and uses more first name letters if duplicates are detected. Parameters df : pandas.DataFrame DataFrame that contains a column where generalize_names should be applied. col_name : str Name of the DataFrame column where generalize_names function should be applied to. Returns df_new : str New DataFrame object where generalize_names function has been applied without duplicates. Examples For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/","title":"API"},{"location":"user_guide/text/tokenizer/","text":"Tokenizer Different functions to tokenize text. from mlxtend.text import tokenizer_[type] Overview Different functions to tokenize text for natural language processing tasks, for example such as building a bag-of-words model for text classification. References - Example 1 - Extract Emoticons from mlxtend.text import tokenizer_emoticons tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] Example 2 - Extract Words and Emoticons from mlxtend.text import tokenizer_words_and_emoticons tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] API tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/ tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"Tokenizer"},{"location":"user_guide/text/tokenizer/#tokenizer","text":"Different functions to tokenize text. from mlxtend.text import tokenizer_[type]","title":"Tokenizer"},{"location":"user_guide/text/tokenizer/#overview","text":"Different functions to tokenize text for natural language processing tasks, for example such as building a bag-of-words model for text classification.","title":"Overview"},{"location":"user_guide/text/tokenizer/#references","text":"-","title":"References"},{"location":"user_guide/text/tokenizer/#example-1-extract-emoticons","text":"from mlxtend.text import tokenizer_emoticons tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)']","title":"Example 1 - Extract Emoticons"},{"location":"user_guide/text/tokenizer/#example-2-extract-words-and-emoticons","text":"from mlxtend.text import tokenizer_words_and_emoticons tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)']","title":"Example 2 - Extract Words and Emoticons"},{"location":"user_guide/text/tokenizer/#api","text":"tokenizer_emoticons(text) Return emoticons from text Examples >>> tokenizer_emoticons('This :) is :( a test :-)!') [':)', ':(', ':-)'] For usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_emoticons/ tokenizer_words_and_emoticons(text) Convert text to lowercase words and emoticons. Examples >>> tokenizer_words_and_emoticons('This :) is :( a test :-)!') ['this', 'is', 'a', 'test', ':)', ':(', ':-)'] For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/text/tokenizer_words_and_emoticons/","title":"API"},{"location":"user_guide/utils/Counter/","text":"Counter A simple progress counter to print the number of iterations and time elapsed in a for-loop execution. from mlxtend.utils import Counter Overview The Counter class implements an object for displaying the number of iterations and time elapsed in a for-loop. Please note that the Counter was implemented for efficiency; thus, the Counter offers only very basic functionality in order to avoid relatively expensive evaluations (of if-else statements). References - Example 1 - Counting the iterations in a for-loop from mlxtend.utils import Counter import time cnt = Counter() for i in range(20): # do some computation time.sleep(0.1) cnt.update() 20 iter | 2 sec Note that the first number displays the current iteration, and the second number shows the time elapsed after initializing the Counter . API Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/ Methods update() Print current iteration and time elapsed.","title":"Counter"},{"location":"user_guide/utils/Counter/#counter","text":"A simple progress counter to print the number of iterations and time elapsed in a for-loop execution. from mlxtend.utils import Counter","title":"Counter"},{"location":"user_guide/utils/Counter/#overview","text":"The Counter class implements an object for displaying the number of iterations and time elapsed in a for-loop. Please note that the Counter was implemented for efficiency; thus, the Counter offers only very basic functionality in order to avoid relatively expensive evaluations (of if-else statements).","title":"Overview"},{"location":"user_guide/utils/Counter/#references","text":"-","title":"References"},{"location":"user_guide/utils/Counter/#example-1-counting-the-iterations-in-a-for-loop","text":"from mlxtend.utils import Counter import time cnt = Counter() for i in range(20): # do some computation time.sleep(0.1) cnt.update() 20 iter | 2 sec Note that the first number displays the current iteration, and the second number shows the time elapsed after initializing the Counter .","title":"Example 1 - Counting the iterations in a for-loop"},{"location":"user_guide/utils/Counter/#api","text":"Counter(stderr=False, start_newline=True, precision=0, name=None) Class to display the progress of for-loop iterators. Parameters stderr : bool (default: True) Prints output to sys.stderr if True; uses sys.stdout otherwise. start_newline : bool (default: True) Prepends a new line to the counter, which prevents overwriting counters if multiple counters are printed in succession. precision: int (default: 0) Sets the number of decimal places when displaying the time elapsed in seconds. name : string (default: None) Prepends the specified name before the counter to allow distinguishing between multiple counters. Attributes curr_iter : int The current iteration. start_time : float The system's time in seconds when the Counter was initialized. end_time : float The system's time in seconds when the Counter was last updated. Examples >>> cnt = Counter() >>> for i in range(20): ... # do some computation ... time.sleep(0.1) ... cnt.update() 20 iter | 2 sec >>> print('The counter was initialized.' ' %d seconds ago.' % (time.time() - cnt.start_time)) The counter was initialized 2 seconds ago >>> print('The counter was last updated' ' %d seconds ago.' % (time.time() - cnt.end_time)) The counter was last updated 0 seconds ago. For more usage examples, please see http://rasbt.github.io/mlxtend/user_guide/utils/Counter/","title":"API"},{"location":"user_guide/utils/Counter/#methods","text":"update() Print current iteration and time elapsed.","title":"Methods"}]} \ No newline at end of file diff --git a/docs/_site/site/sitemap.xml b/docs/_site/site/sitemap.xml index 0bc39f9d4..46ae62fb6 100644 --- a/docs/_site/site/sitemap.xml +++ b/docs/_site/site/sitemap.xml @@ -2,517 +2,522 @@ http://rasbt.github.io/mlxtend/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/USER_GUIDE_INDEX/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/classifier/Adaline/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/classifier/EnsembleVoteClassifier/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/classifier/LogisticRegression/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/classifier/MultiLayerPerceptron/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/classifier/Perceptron/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/classifier/SoftmaxRegression/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/classifier/StackingClassifier/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/classifier/StackingCVClassifier/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/cluster/Kmeans/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/data/autompg_data/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/data/boston_housing_data/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/data/iris_data/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/data/make_multiplexer_dataset/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/data/three_blobs_data/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/data/wine_data/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/bootstrap_point632_score/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/BootstrapOutOfBag/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/cochrans_q/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/combined_ftest_5x2cv/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/feature_importance_permutation/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/ftest/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/lift_score/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_table/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar_tables/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/mcnemar/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_5x2cv/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_kfold_cv/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/paired_ttest_resampled/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/PredefinedHoldoutSplit/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/RandomHoldoutSplit/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/evaluate/scoring/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/feature_extraction/LinearDiscriminantAnalysis/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/feature_extraction/PrincipalComponentAnalysis/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/feature_extraction/RBFKernelPCA/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/feature_selection/ColumnSelector/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/feature_selection/ExhaustiveFeatureSelector/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/file_io/find_filegroups/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/file_io/find_files/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/general_concepts/activation-functions/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/general_concepts/gradient-optimization/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/general_concepts/linear-gradient-derivative/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/general_concepts/regularization-linear/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/image/extract_face_landmarks/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/math/num_combinations/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/math/num_permutations/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/math/vectorspace_dimensionality/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/math/vectorspace_orthonormalization/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/plotting/category_scatter/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/plotting/checkerboard_plot/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/plotting/ecdf/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/plotting/enrichment_plot/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/plotting/plot_confusion_matrix/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/plotting/plot_learning_curves/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/plotting/plot_linear_regression/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/plotting/plot_sequential_feature_selection/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/plotting/scatterplotmatrix/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/plotting/stacked_barplot/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/preprocessing/CopyTransformer/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/preprocessing/DenseTransformer/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/preprocessing/MeanCenterer/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/preprocessing/minmax_scaling/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/preprocessing/one-hot_encoding/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/preprocessing/shuffle_arrays_unison/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/preprocessing/standardize/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/preprocessing/TransactionEncoder/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/regressor/LinearRegression/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/regressor/StackingRegressor/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/text/generalize_names/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/text/generalize_names_duplcheck/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/text/tokenizer/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/user_guide/utils/Counter/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.classifier/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.cluster/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.data/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.evaluate/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.feature_extraction/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.feature_selection/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.file_io/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.frequent_patterns/ - 2018-11-09 + 2018-11-22 + daily + + + http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.image/ + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.plotting/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.preprocessing/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.regressor/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.text/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/api_subpackages/mlxtend.utils/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/installation/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/CHANGELOG/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/CONTRIBUTING/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/contributors/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/license/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/cite/ - 2018-11-09 + 2018-11-22 daily http://rasbt.github.io/mlxtend/discuss/ - 2018-11-09 + 2018-11-22 daily \ No newline at end of file diff --git a/docs/_site/site/sitemap.xml.gz b/docs/_site/site/sitemap.xml.gz index 8ad7fc4a6ae1ef808e8237e30eadde4e2e8898f1..e358b74d3cbe9087c09748bb7818f3bb538b5997 100644 GIT binary patch literal 1306 zcmV+#1?Bo5iwFqV;PzVr|8r?{Wo=<_E_iKh0M(mIZ`(K!$M5?og4}I2S?r>_NK+(E z+6GONAdg~i#F!dMMEFpLq+|Ef??`r>8f^}HSYYUjElRe3q~`G-k~(_7baron6ix8& z_7A3S_V>{1P#fRA+kd$KFnPQG{`j|}Og3WL0kTDzwPmB_<{AOy(1g+@#Yr@;yg`Tle#>gnov2rOz7+Cs2VPMz8+$7 zp*}u*bMSU@a4kOYk3A9`4R>)yId6vvYNE zb$0%bD7YMvRGUqWmFB;VrZGs>Ne$L`EP(XfM|5+GUqiy#OS63N%g~ybjK0N|Fb>{R z0Q8q^levTy(hZWvK2Z_aMZw;MCOKFZWfyk|^dm?7qC9X5{j~(_924Vxb_hO7rK}-A z1vv%Rw`x3)3_1@n1+Ti0BU^!dUPC6zr?dfT{u)^2JSRBOtPUPgxgn?Z7MFZ(Xl0oy z8+sU4`F4;xLR51b<^>q-6N_EZl(+z!At48?kYXaPjfx>veK0=#`TL=YIuMEnKYa$j z&gr^2g;qSV4!R@O1?pP?sBX^AR;wljt|a6$2cKiGzNN^6=_NJw+qqXSY2=C&}}SPHIq&K?YL;7tVu-W}w}CfHh) zMok#Oad}b8$9~BWs)zu5!=sGNpe8=CWrUpWdTZok--54$D++nYa>k1l!q0LCmn_@$ zC~+>?EP&2T)MT9T;M0i*yNV_j)QKA&bZvUDv%o^mW09S>J^k<*$)mlQok+)yXmL$F z3pP7{Hy#Hnhx>flL5|5R@Wc9Cd0n`}Z<$TqfGwp9#&W3@_}z0|%Lgrm+{a>Y<&x6c z6jTo^rpU(%dNQ1f3IdKXXtNeolm(Az9zW6BQM^pHov4w5YSy)$Ures0K7D%OahN z`bV)Q?;v%R&rYppR6(?VZef3@F=fLpM#T~P!4_)c&=1qUz_r?{80M?=J2kuZmBFzI(t%g?d#Hi~=KBOn3J2xt+5(ahnph{UK+NG{h zn-Q8bw&bY1OKn+A_+i(M&a}W%phpoj47Bi_eY@CkTaT-si*@Ne*d&tSxDc<5!?r}M z)f`B4>?7&s%_`b_$1;G&z_DKAI3J8et~YK3 zN1)BBnEXczUWi{d>ECYKih&l7oN$?*=kD0?lm{nQO#c#A)w#E06)qY(g-7%ZE!Glp zGB&b|%U;^F3-m_CJnz}1pk?dav`@9nhjw1aT|dE70;{;4@UMYxH>i{V`-{C1C1J~~ z`Cp~pEOHmY`qQd$3DoOFvl2+RQ!7Ql?i3j%5R*ui`x@;i$4UTQD>6x>#&eNwt-Phg z*{74MkLQ=yA7xQpU)|qcoIc!NT*(5{!6!2FJggLp0<%UBQ?jxK%LtQ~wTU{%|MA9+ QZxPe+zqSHdoZLqM03uzDFaQ7m literal 1301 zcmV+w1?u`AiwFq)W9C}||8r?{Wo=<_E_iKh0M(mIZ`(K!$M5|VLGHGaE_Tr_k`#&K zwn5V*$fMXBF{VZ`5kAx*>Dc}BJCYrzMw`PP78v?si<0dhsd@Z|q>er;o!wg?MHBq- z{^9swe-FJ5wejuo{=@yp(YyT*CvT22*@$V6FZS_xf1XnJcU3(-J&mJD=%9<^Ky4NE zoDX8PX{-*$@ApsM?D4Drf%T`oBOCO|<`)R!ER9=}=6NlSa(AU*b)nD{{J;dZf zee(9;@ZIR}aCGpVPjA{bm*_e878}CvCp8$mI;wtZSp!sCg8#_!a5ueGpB^sGr|RPB zeEN?lxEzsGn@x?C=D&@mF-X;E4c2%pfOP63x|zjqA>rhuSw8q>XiZE;-(pJ`d+#X# z`fIkyT*3>2zz zr|aenTJgj>=sB@2P~Qtcbu)IhS~UqdjQzQMs}}`8`zH5NTscT&mg0dMgp>)Dab2JU zUZd)eTuzV#7Zfk}y={o6w5Caggfyo;Iv|x`W(%W@rQnL^?7$EQ-c(TF-9dhAf~{3) z)PxZnmlw5s?3WCoiU`0rJj&SgYT`3nM#$-|vqnDlE%-XPqLBM6XS`S;{495H$+BG! z66cc50_faCO~x4yKAn27t7u|Dow(sa*QNtI2`uD17TJm0vyWepJldPdsdVg!7T461 zV6*dg!*QT;xTniGn;3xsX9K%0>tt$QSu-Z*Gk?XES8YCxm1EYi8C ze-vx-4${2x*{Su6Dv0*aE$mM*ggB>W%gAe2wTXq2K zY)GlEFEtmNZJ?EkdPt!1f#;|nk!As?R(-2@V$^jbAJUW2b2lid5(c&Zph{UK+NG{h zn*o|Lw&bY1OKn+A_i*@Ne*d&tvxDc<5{kBA` z)eJ~<>?7+s#1Iqv&0>^rdW7->uTyNY6 zjzF7LG5L=cyb!-^(!brd6$33EIpH!r&)u=(DGyGrnEoxSs&i+DDqJ*n3XkX+TC64H zWNc&^m%X%U7wC+NdDgK@LCe;;X&-Bu5AD2;yMBVF1Xgi7;a>yYZcr%!_BVSYO2U>| z^S?^HS>!H)^_NxS5~#Nm7Dd4BNcR$mNu+jsjdl<@C4jDVjwDj!x%9PG-U8j^^Xb*6 z>E-n&Syb0o_qP{k5BC>WvcPokiOekb>#w4~tkL~Mr>wy;!bDqbqR#Puym7-@#B}m6 L*TXLxGDiRa85Wby diff --git a/docs/_site/site/user_guide/classifier/Adaline/index.html b/docs/_site/site/user_guide/classifier/Adaline/index.html index 46772856a..e7ea4fffe 100644 --- a/docs/_site/site/user_guide/classifier/Adaline/index.html +++ b/docs/_site/site/user_guide/classifier/Adaline/index.html @@ -805,6 +805,12 @@ +
    • + Mlxtend.image +
    • + + +
    • Mlxtend.plotting
    • @@ -897,6 +903,8 @@ Search + +
    • - Edit on GitHub + GitHub
    @@ -922,21 +930,21 @@
    @@ -1281,7 +1289,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/classifier/EnsembleVoteClassifier/index.html b/docs/_site/site/user_guide/classifier/EnsembleVoteClassifier/index.html index 887f58f33..9fa41a5a3 100644 --- a/docs/_site/site/user_guide/classifier/EnsembleVoteClassifier/index.html +++ b/docs/_site/site/user_guide/classifier/EnsembleVoteClassifier/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -923,30 +931,30 @@
  • EnsembleVoteClassifier
  • Overview
  • Majority Voting / Hard Voting
  • - +
  • Weighted Majority Vote
  • - +
  • Soft Voting
  • - +
  • References
  • - +
  • Example 1 - Classifying Iris Flowers Using Different Classification Models
  • - -
  • Plotting Decision Regions
  • +
  • Example 2 - Grid Search
  • - +
  • Example 3 - Majority voting with classifiers trained on different feature subsets
  • - -
  • Manual Approach
  • +
  • Example 5 - Using Pre-fitted Classifiers
  • - +
  • Example 6 - Ensembles of Classifiers that Operate on Different Feature Subsets
  • - +
  • Example 7 - A Note about Scikit-Learn SVMs and Soft Voting
  • - +
  • API
  • Methods
  • - +
    @@ -1687,7 +1695,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/classifier/LogisticRegression/index.html b/docs/_site/site/user_guide/classifier/LogisticRegression/index.html index 7c1cb2234..d69946e0b 100644 --- a/docs/_site/site/user_guide/classifier/LogisticRegression/index.html +++ b/docs/_site/site/user_guide/classifier/LogisticRegression/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,23 +930,23 @@
    @@ -1389,7 +1397,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/classifier/MultiLayerPerceptron/index.html b/docs/_site/site/user_guide/classifier/MultiLayerPerceptron/index.html index 53adbe879..5336f1bab 100644 --- a/docs/_site/site/user_guide/classifier/MultiLayerPerceptron/index.html +++ b/docs/_site/site/user_guide/classifier/MultiLayerPerceptron/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,19 +930,19 @@
    @@ -1320,7 +1328,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/classifier/Perceptron/index.html b/docs/_site/site/user_guide/classifier/Perceptron/index.html index f32b0d333..ff9547040 100644 --- a/docs/_site/site/user_guide/classifier/Perceptron/index.html +++ b/docs/_site/site/user_guide/classifier/Perceptron/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,15 +930,15 @@
    @@ -1200,7 +1208,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/classifier/SoftmaxRegression/index.html b/docs/_site/site/user_guide/classifier/SoftmaxRegression/index.html index d602077b8..74237b8b3 100644 --- a/docs/_site/site/user_guide/classifier/SoftmaxRegression/index.html +++ b/docs/_site/site/user_guide/classifier/SoftmaxRegression/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,16 +930,16 @@
    @@ -1396,7 +1404,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/classifier/StackingCVClassifier/index.html b/docs/_site/site/user_guide/classifier/StackingCVClassifier/index.html index f42912cfe..e9367088b 100644 --- a/docs/_site/site/user_guide/classifier/StackingCVClassifier/index.html +++ b/docs/_site/site/user_guide/classifier/StackingCVClassifier/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -923,18 +931,18 @@
  • StackingCVClassifier
  • Overview
  • References
  • - +
  • Example 1 - Simple Stacking CV Classification
  • - +
  • Example 2 - Using Probabilities as Meta-Features
  • - +
  • Example 3 - Stacked CV Classification and GridSearch
  • - +
  • Example 4 - Stacking of Classifiers that Operate on Different Feature Subsets
  • - +
  • API
  • Methods
  • - +
    @@ -1510,7 +1518,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/classifier/StackingClassifier.ipynb b/docs/_site/site/user_guide/classifier/StackingClassifier.ipynb index 0a15114c6..454a2c08b 100644 --- a/docs/_site/site/user_guide/classifier/StackingClassifier.ipynb +++ b/docs/_site/site/user_guide/classifier/StackingClassifier.ipynb @@ -63,6 +63,13 @@ "![](./StackingClassifier_files/stacking_algorithm.png)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Please note that this type of Stacking is prone to overfitting due to information leakage. The related [StackingCVClassifier.md](StackingCVClassifier.md) does not derive the predictions for the 2nd-level classifier from the same datast that was used for training the level-1 classifiers and is recommended instead.**" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -781,7 +788,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.6.7" }, "toc": { "nav_menu": {}, diff --git a/docs/_site/site/user_guide/classifier/StackingClassifier/index.html b/docs/_site/site/user_guide/classifier/StackingClassifier/index.html index efe4411a2..a878a7b4e 100644 --- a/docs/_site/site/user_guide/classifier/StackingClassifier/index.html +++ b/docs/_site/site/user_guide/classifier/StackingClassifier/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -923,18 +931,18 @@
  • StackingClassifier
  • Overview
  • References
  • - +
  • Example 1 - Simple Stacked Classification
  • - +
  • Example 2 - Using Probabilities as Meta-Features
  • - +
  • Example 3 - Stacked Classification and GridSearch
  • - +
  • Example 4 - Stacking of Classifiers that Operate on Different Feature Subsets
  • - +
  • API
  • Methods
  • - +
    @@ -950,6 +958,7 @@

    Overview

    The algorithm can be summarized as follows (source: [1]):

    +

    Please note that this type of Stacking is prone to overfitting due to information leakage. The related StackingCVClassifier.md does not derive the predictions for the 2nd-level classifier from the same datast that was used for training the level-1 classifiers and is recommended instead.

    References

    @@ -922,16 +930,16 @@
    @@ -1131,7 +1139,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/data/autompg_data/index.html b/docs/_site/site/user_guide/data/autompg_data/index.html index f7d873d3e..70e8b226c 100644 --- a/docs/_site/site/user_guide/data/autompg_data/index.html +++ b/docs/_site/site/user_guide/data/autompg_data/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1036,7 +1044,7 @@

    API

    diff --git a/docs/_site/site/user_guide/data/boston_housing_data/index.html b/docs/_site/site/user_guide/data/boston_housing_data/index.html index c8f6c0eef..ed07b9414 100644 --- a/docs/_site/site/user_guide/data/boston_housing_data/index.html +++ b/docs/_site/site/user_guide/data/boston_housing_data/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1036,7 +1044,7 @@

    API

    diff --git a/docs/_site/site/user_guide/data/iris_data/index.html b/docs/_site/site/user_guide/data/iris_data/index.html index 27721bf32..20570e9dc 100644 --- a/docs/_site/site/user_guide/data/iris_data/index.html +++ b/docs/_site/site/user_guide/data/iris_data/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1026,7 +1034,7 @@

    API

    diff --git a/docs/_site/site/user_guide/data/loadlocal_mnist/index.html b/docs/_site/site/user_guide/data/loadlocal_mnist/index.html index 6437b4913..67b2ba139 100644 --- a/docs/_site/site/user_guide/data/loadlocal_mnist/index.html +++ b/docs/_site/site/user_guide/data/loadlocal_mnist/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,15 +930,15 @@
    @@ -1093,7 +1101,7 @@

    API

    diff --git a/docs/_site/site/user_guide/data/make_multiplexer_dataset/index.html b/docs/_site/site/user_guide/data/make_multiplexer_dataset/index.html index 4f62dc65f..dbfaa9e01 100644 --- a/docs/_site/site/user_guide/data/make_multiplexer_dataset/index.html +++ b/docs/_site/site/user_guide/data/make_multiplexer_dataset/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1056,7 +1064,7 @@

    API

    diff --git a/docs/_site/site/user_guide/data/mnist_data/index.html b/docs/_site/site/user_guide/data/mnist_data/index.html index 194156b84..52c67dfbf 100644 --- a/docs/_site/site/user_guide/data/mnist_data/index.html +++ b/docs/_site/site/user_guide/data/mnist_data/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1080,7 +1088,7 @@

    API

    diff --git a/docs/_site/site/user_guide/data/three_blobs_data/index.html b/docs/_site/site/user_guide/data/three_blobs_data/index.html index 1bd87c58f..d2e9a7e2d 100644 --- a/docs/_site/site/user_guide/data/three_blobs_data/index.html +++ b/docs/_site/site/user_guide/data/three_blobs_data/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1039,7 +1047,7 @@

    API

    diff --git a/docs/_site/site/user_guide/data/wine_data/index.html b/docs/_site/site/user_guide/data/wine_data/index.html index afc790178..d0bcc5a5a 100644 --- a/docs/_site/site/user_guide/data/wine_data/index.html +++ b/docs/_site/site/user_guide/data/wine_data/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1152,7 +1160,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/BootstrapOutOfBag.ipynb b/docs/_site/site/user_guide/evaluate/BootstrapOutOfBag.ipynb index f0b5a906d..0f9c209de 100644 --- a/docs/_site/site/user_guide/evaluate/BootstrapOutOfBag.ipynb +++ b/docs/_site/site/user_guide/evaluate/BootstrapOutOfBag.ipynb @@ -350,7 +350,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.4" + "version": "3.6.7" }, "toc": { "nav_menu": {}, diff --git a/docs/_site/site/user_guide/evaluate/BootstrapOutOfBag/index.html b/docs/_site/site/user_guide/evaluate/BootstrapOutOfBag/index.html index 8e8c3256f..cbd04190a 100644 --- a/docs/_site/site/user_guide/evaluate/BootstrapOutOfBag/index.html +++ b/docs/_site/site/user_guide/evaluate/BootstrapOutOfBag/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,13 +930,13 @@
    @@ -1099,7 +1107,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/evaluate/PredefinedHoldoutSplit/index.html b/docs/_site/site/user_guide/evaluate/PredefinedHoldoutSplit/index.html index e9ddeed84..84bdcef5a 100644 --- a/docs/_site/site/user_guide/evaluate/PredefinedHoldoutSplit/index.html +++ b/docs/_site/site/user_guide/evaluate/PredefinedHoldoutSplit/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1074,7 +1082,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/evaluate/RandomHoldoutSplit/index.html b/docs/_site/site/user_guide/evaluate/RandomHoldoutSplit/index.html index 5f26ae915..4db878fd6 100644 --- a/docs/_site/site/user_guide/evaluate/RandomHoldoutSplit/index.html +++ b/docs/_site/site/user_guide/evaluate/RandomHoldoutSplit/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1081,7 +1089,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/evaluate/bootstrap/index.html b/docs/_site/site/user_guide/evaluate/bootstrap/index.html index b3cd1bb07..feeb10b64 100644 --- a/docs/_site/site/user_guide/evaluate/bootstrap/index.html +++ b/docs/_site/site/user_guide/evaluate/bootstrap/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1078,7 +1086,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/bootstrap_point632_score/index.html b/docs/_site/site/user_guide/evaluate/bootstrap_point632_score/index.html index 9ea8b0658..14ca11ba2 100644 --- a/docs/_site/site/user_guide/evaluate/bootstrap_point632_score/index.html +++ b/docs/_site/site/user_guide/evaluate/bootstrap_point632_score/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,18 +930,18 @@
    @@ -1169,7 +1177,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/cochrans_q/index.html b/docs/_site/site/user_guide/evaluate/cochrans_q/index.html index e2c2b5b5c..17694f29b 100644 --- a/docs/_site/site/user_guide/evaluate/cochrans_q/index.html +++ b/docs/_site/site/user_guide/evaluate/cochrans_q/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1188,7 +1196,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/combined_ftest_5x2cv.ipynb b/docs/_site/site/user_guide/evaluate/combined_ftest_5x2cv.ipynb index 029dcb4dd..22b7c8adc 100644 --- a/docs/_site/site/user_guide/evaluate/combined_ftest_5x2cv.ipynb +++ b/docs/_site/site/user_guide/evaluate/combined_ftest_5x2cv.ipynb @@ -42,7 +42,7 @@ "metadata": {}, "source": [ "The 5x2cv combined *F* test is a procedure for comparing the performance of two models (classifiers or regressors)\n", - "that was proposed by Alpaydin [1] as a more robust alternative to Dietterich's 5x2cv paired t-test procedure [2]. [`paired_ttest_5x2cv.md`](paired_ttest_5x2cv.md). Dietterich's 5x2cv method was in turn was designed to address shortcomings in other methods such as the resampled paired *t* test (see [`paired_ttest_resampled`](paired_ttest_resampled.md)) and the k-fold cross-validated paired *t* test (see [`paired_ttest_kfold_cv`](paired_ttest_kfold_cv.md)).\n", + "that was proposed by Alpaydin [1] as a more robust alternative to Dietterich's 5x2cv paired t-test procedure [2]. [`paired_ttest_5x2cv.md`](paired_ttest_5x2cv.md). Dietterich's 5x2cv method was in turn designed to address shortcomings in other methods such as the resampled paired *t* test (see [`paired_ttest_resampled`](paired_ttest_resampled.md)) and the k-fold cross-validated paired *t* test (see [`paired_ttest_kfold_cv`](paired_ttest_kfold_cv.md)).\n", "\n", "To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset *D*. In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the 5x2cv paired *t* test, we repeat the splitting (50% training and 50% test data) 5 times. \n", "\n", @@ -54,7 +54,7 @@ "\n", "$$p^{(2)} = p^{(2)}_A - p^{(2)}_B.$$\n", "\n", - "Then, we estimate the estimate mean and variance of the differences:\n", + "Then, we estimate mean and variance of the differences:\n", "\n", "$\\overline{p} = \\frac{p^{(1)} + p^{(2)}}{2}$\n", "\n", diff --git a/docs/_site/site/user_guide/evaluate/combined_ftest_5x2cv/index.html b/docs/_site/site/user_guide/evaluate/combined_ftest_5x2cv/index.html index b89a0a76f..59ee8fd28 100644 --- a/docs/_site/site/user_guide/evaluate/combined_ftest_5x2cv/index.html +++ b/docs/_site/site/user_guide/evaluate/combined_ftest_5x2cv/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -939,7 +947,7 @@

    5x2cv combined F test

    Overview

    The 5x2cv combined F test is a procedure for comparing the performance of two models (classifiers or regressors) -that was proposed by Alpaydin [1] as a more robust alternative to Dietterich's 5x2cv paired t-test procedure [2]. paired_ttest_5x2cv.md. Dietterich's 5x2cv method was in turn was designed to address shortcomings in other methods such as the resampled paired t test (see paired_ttest_resampled) and the k-fold cross-validated paired t test (see paired_ttest_kfold_cv).

    +that was proposed by Alpaydin [1] as a more robust alternative to Dietterich's 5x2cv paired t-test procedure [2]. paired_ttest_5x2cv.md. Dietterich's 5x2cv method was in turn designed to address shortcomings in other methods such as the resampled paired t test (see paired_ttest_resampled) and the k-fold cross-validated paired t test (see paired_ttest_kfold_cv).

    To explain how this method works, let's consider to estimator (e.g., classifiers) A and B. Further, we have a labeled dataset D. In the common hold-out method, we typically split the dataset into 2 parts: a training and a test set. In the 5x2cv paired t test, we repeat the splitting (50% training and 50% test data) 5 times.

    In each of the 5 iterations, we fit A and B to the training split and evaluate their performance ( and ) on the test split. Then, we rotate the training and test sets (the training set becomes the test set and vice versa) compute the performance again, which results in 2 performance difference measures:

    @@ -949,7 +957,7 @@

    Overview

    -

    Then, we estimate the estimate mean and variance of the differences:

    +

    Then, we estimate mean and variance of the differences:

    @@ -1100,7 +1108,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/confusion_matrix/index.html b/docs/_site/site/user_guide/evaluate/confusion_matrix/index.html index 9b97abefb..06c41d04b 100644 --- a/docs/_site/site/user_guide/evaluate/confusion_matrix/index.html +++ b/docs/_site/site/user_guide/evaluate/confusion_matrix/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,17 +930,17 @@
    @@ -1066,7 +1074,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/feature_importance_permutation/index.html b/docs/_site/site/user_guide/evaluate/feature_importance_permutation/index.html index 3941ca75c..af10dcc43 100644 --- a/docs/_site/site/user_guide/evaluate/feature_importance_permutation/index.html +++ b/docs/_site/site/user_guide/evaluate/feature_importance_permutation/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,18 +930,18 @@
    @@ -1219,7 +1227,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/ftest/index.html b/docs/_site/site/user_guide/evaluate/ftest/index.html index 7dbf96bed..e583e84ee 100644 --- a/docs/_site/site/user_guide/evaluate/ftest/index.html +++ b/docs/_site/site/user_guide/evaluate/ftest/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1083,7 +1091,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/lift_score/index.html b/docs/_site/site/user_guide/evaluate/lift_score/index.html index 6f3d462df..0c8a67c52 100644 --- a/docs/_site/site/user_guide/evaluate/lift_score/index.html +++ b/docs/_site/site/user_guide/evaluate/lift_score/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1075,7 +1083,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/mcnemar.ipynb b/docs/_site/site/user_guide/evaluate/mcnemar.ipynb index add0f30b9..92285de0a 100644 --- a/docs/_site/site/user_guide/evaluate/mcnemar.ipynb +++ b/docs/_site/site/user_guide/evaluate/mcnemar.ipynb @@ -416,7 +416,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.4" + "version": "3.6.7" }, "toc": { "nav_menu": {}, diff --git a/docs/_site/site/user_guide/evaluate/mcnemar/index.html b/docs/_site/site/user_guide/evaluate/mcnemar/index.html index 6bcaa8699..ac81e33b0 100644 --- a/docs/_site/site/user_guide/evaluate/mcnemar/index.html +++ b/docs/_site/site/user_guide/evaluate/mcnemar/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,20 +930,20 @@
    @@ -1095,7 +1103,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/mcnemar_table/index.html b/docs/_site/site/user_guide/evaluate/mcnemar_table/index.html index 24ca99cdd..7e1ad87de 100644 --- a/docs/_site/site/user_guide/evaluate/mcnemar_table/index.html +++ b/docs/_site/site/user_guide/evaluate/mcnemar_table/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,13 +930,13 @@
    @@ -1027,7 +1035,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/mcnemar_tables/index.html b/docs/_site/site/user_guide/evaluate/mcnemar_tables/index.html index b93c1a5ca..341075976 100644 --- a/docs/_site/site/user_guide/evaluate/mcnemar_tables/index.html +++ b/docs/_site/site/user_guide/evaluate/mcnemar_tables/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,15 +930,15 @@
    @@ -1081,7 +1089,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/paired_ttest_5x2cv/index.html b/docs/_site/site/user_guide/evaluate/paired_ttest_5x2cv/index.html index 218b5eab6..07b16e677 100644 --- a/docs/_site/site/user_guide/evaluate/paired_ttest_5x2cv/index.html +++ b/docs/_site/site/user_guide/evaluate/paired_ttest_5x2cv/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1096,7 +1104,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/paired_ttest_kfold_cv/index.html b/docs/_site/site/user_guide/evaluate/paired_ttest_kfold_cv/index.html index 868e58ddb..1012e851f 100644 --- a/docs/_site/site/user_guide/evaluate/paired_ttest_kfold_cv/index.html +++ b/docs/_site/site/user_guide/evaluate/paired_ttest_kfold_cv/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1096,7 +1104,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/paired_ttest_resampled/index.html b/docs/_site/site/user_guide/evaluate/paired_ttest_resampled/index.html index 9bbc40ade..a82554c9b 100644 --- a/docs/_site/site/user_guide/evaluate/paired_ttest_resampled/index.html +++ b/docs/_site/site/user_guide/evaluate/paired_ttest_resampled/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1109,7 +1117,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/permutation_test/index.html b/docs/_site/site/user_guide/evaluate/permutation_test/index.html index 6ace323ce..2f2d8428c 100644 --- a/docs/_site/site/user_guide/evaluate/permutation_test/index.html +++ b/docs/_site/site/user_guide/evaluate/permutation_test/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1067,7 +1075,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/proportion_difference/index.html b/docs/_site/site/user_guide/evaluate/proportion_difference/index.html index d0f01f5db..a7443673a 100644 --- a/docs/_site/site/user_guide/evaluate/proportion_difference/index.html +++ b/docs/_site/site/user_guide/evaluate/proportion_difference/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,8 +903,10 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -912,12 +920,12 @@
    @@ -1081,7 +1089,7 @@

    API

    diff --git a/docs/_site/site/user_guide/evaluate/scoring/index.html b/docs/_site/site/user_guide/evaluate/scoring/index.html index 9b50e6d19..6f60d886d 100644 --- a/docs/_site/site/user_guide/evaluate/scoring/index.html +++ b/docs/_site/site/user_guide/evaluate/scoring/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,7 +930,7 @@
    @@ -1090,7 +1098,7 @@

    API

    diff --git a/docs/_site/site/user_guide/feature_extraction/LinearDiscriminantAnalysis/index.html b/docs/_site/site/user_guide/feature_extraction/LinearDiscriminantAnalysis/index.html index 0ecc7fa65..f4636602c 100644 --- a/docs/_site/site/user_guide/feature_extraction/LinearDiscriminantAnalysis/index.html +++ b/docs/_site/site/user_guide/feature_extraction/LinearDiscriminantAnalysis/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,16 +930,16 @@
    @@ -1110,7 +1118,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/feature_extraction/PrincipalComponentAnalysis/index.html b/docs/_site/site/user_guide/feature_extraction/PrincipalComponentAnalysis/index.html index 7e8a2f69f..93fab7a91 100644 --- a/docs/_site/site/user_guide/feature_extraction/PrincipalComponentAnalysis/index.html +++ b/docs/_site/site/user_guide/feature_extraction/PrincipalComponentAnalysis/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,21 +930,21 @@
    @@ -1195,7 +1203,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/feature_extraction/RBFKernelPCA/index.html b/docs/_site/site/user_guide/feature_extraction/RBFKernelPCA/index.html index 1e6a2a9b7..96bf8640e 100644 --- a/docs/_site/site/user_guide/feature_extraction/RBFKernelPCA/index.html +++ b/docs/_site/site/user_guide/feature_extraction/RBFKernelPCA/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -923,28 +931,28 @@
  • RBF Kernel Principal Component Analysis
  • Overview
  • Principal Component Analysis
  • - +
  • Nonlinear dimensionality reduction
  • - +
  • Kernel functions and the kernel trick
  • - +
  • Gaussian radial basis function (RBF) Kernel PCA
  • - +
  • RBF kernel PCA step-by-step
  • - +
  • Projecting new data
  • - +
  • References
  • - +
  • Example 1 - Half-moon shapes
  • Projecting new data
  • - +
  • Example 2 - Concentric circles
  • API
  • Methods
  • - +
    @@ -1268,7 +1276,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/feature_selection/ColumnSelector.ipynb b/docs/_site/site/user_guide/feature_selection/ColumnSelector.ipynb index 483a7c2dd..482269f43 100644 --- a/docs/_site/site/user_guide/feature_selection/ColumnSelector.ipynb +++ b/docs/_site/site/user_guide/feature_selection/ColumnSelector.ipynb @@ -2,10 +2,8 @@ "cells": [ { "cell_type": "code", - "execution_count": 14, - "metadata": { - "collapsed": true - }, + "execution_count": 1, + "metadata": {}, "outputs": [], "source": [ "%matplotlib inline" @@ -71,10 +69,8 @@ }, { "cell_type": "code", - "execution_count": 15, - "metadata": { - "collapsed": true - }, + "execution_count": 2, + "metadata": {}, "outputs": [], "source": [ "from sklearn.datasets import load_iris\n", @@ -93,7 +89,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -102,7 +98,7 @@ "(150, 2)" ] }, - "execution_count": 16, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -124,7 +120,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -203,7 +199,7 @@ "4 5.0 3.6 1.4 0.2" ] }, - "execution_count": 17, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -217,7 +213,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -226,7 +222,7 @@ "(150, 2)" ] }, - "execution_count": 18, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -245,16 +241,16 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "0.83999999999999997" + "0.84" ] }, - "execution_count": 4, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -303,10 +299,8 @@ }, { "cell_type": "code", - "execution_count": 5, - "metadata": { - "collapsed": true - }, + "execution_count": 7, + "metadata": {}, "outputs": [], "source": [ "from sklearn.datasets import load_iris\n", @@ -325,7 +319,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -354,7 +348,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -386,6 +380,68 @@ "print('Best performance:', grid.best_score_)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 3 -- Scaling of a Subset of Features in a scikit-learn Pipeline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following example illustrates how we could use the `ColumnSelector` in tandem with scikit-learn's `FeatureUnion` to only scale certain features (in this toy example: the first and second feature only) in a datasets in a `Pipeline`." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Pipeline(memory=None,\n", + " steps=[('feats', FeatureUnion(n_jobs=None,\n", + " transformer_list=[('col_1-2', Pipeline(memory=None,\n", + " steps=[('columnselector', ColumnSelector(cols=(0, 1), drop_axis=False)), ('minmaxscaler', MinMaxScaler(copy=True, feature_range=(0, 1)))])), ('col_3-4', ColumnSelector(cols=(2, 3), drop_axis=Fa...ki',\n", + " metric_params=None, n_jobs=None, n_neighbors=5, p=2,\n", + " weights='uniform'))])" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from mlxtend.feature_selection import ColumnSelector\n", + "from sklearn.pipeline import make_pipeline\n", + "from sklearn.pipeline import Pipeline\n", + "from sklearn.pipeline import FeatureUnion\n", + "from sklearn.preprocessing import MinMaxScaler\n", + "from sklearn.neighbors import KNeighborsClassifier\n", + "from mlxtend.data import iris_data\n", + "\n", + "\n", + "X, y = iris_data()\n", + "\n", + "scale_pipe = make_pipeline(ColumnSelector(cols=(0, 1)),\n", + " MinMaxScaler())\n", + "\n", + "pipeline = Pipeline([\n", + " ('feats', FeatureUnion([\n", + " ('col_1-2', scale_pipe),\n", + " ('col_3-4', ColumnSelector(cols=(2, 3)))\n", + " ])),\n", + " ('clf', KNeighborsClassifier())\n", + "])\n", + "\n", + "\n", + "pipeline.fit(X, y)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -395,7 +451,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -540,15 +596,6 @@ " s = f.read() + '

    '\n", "print(s)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [] } ], "metadata": { @@ -568,7 +615,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.4" + "version": "3.6.5" }, "toc": { "nav_menu": {}, diff --git a/docs/_site/site/user_guide/feature_selection/ColumnSelector/index.html b/docs/_site/site/user_guide/feature_selection/ColumnSelector/index.html index 593576bd5..439ecaa47 100644 --- a/docs/_site/site/user_guide/feature_selection/ColumnSelector/index.html +++ b/docs/_site/site/user_guide/feature_selection/ColumnSelector/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,15 +930,17 @@
    @@ -1054,7 +1064,7 @@

    Example 1 - Fitting pipe.score(X, y) -
    0.83999999999999997
    +
    0.84
     

    Example 2 - Feature Selection via GridSearch

    Example 1 showed a simple useage example of the ColumnSelector; however, selecting columns from a dataset is trivial and does not require a specific transformer class since we could have achieved the same results via

    @@ -1105,6 +1115,41 @@

    Example 2 - Feature Selectio
    Best parameters: {'columnselector__cols': (2, 3), 'kneighborsclassifier__n_neighbors': 1}
     Best performance: 0.98
     
    +

    Example 3 -- Scaling of a Subset of Features in a scikit-learn Pipeline

    +

    The following example illustrates how we could use the ColumnSelector in tandem with scikit-learn's FeatureUnion to only scale certain features (in this toy example: the first and second feature only) in a datasets in a Pipeline.

    +
    from mlxtend.feature_selection import ColumnSelector
    +from sklearn.pipeline import make_pipeline
    +from sklearn.pipeline import Pipeline
    +from sklearn.pipeline import FeatureUnion
    +from sklearn.preprocessing import MinMaxScaler
    +from sklearn.neighbors import KNeighborsClassifier
    +from mlxtend.data import iris_data
    +
    +
    +X, y = iris_data()
    +
    +scale_pipe = make_pipeline(ColumnSelector(cols=(0, 1)),
    +                           MinMaxScaler())
    +
    +pipeline = Pipeline([
    +    ('feats', FeatureUnion([
    +        ('col_1-2', scale_pipe),
    +        ('col_3-4', ColumnSelector(cols=(2, 3)))
    +    ])),
    +    ('clf', KNeighborsClassifier())
    +])
    +
    +
    +pipeline.fit(X, y)
    +
    + +
    Pipeline(memory=None,
    +     steps=[('feats', FeatureUnion(n_jobs=None,
    +       transformer_list=[('col_1-2', Pipeline(memory=None,
    +     steps=[('columnselector', ColumnSelector(cols=(0, 1), drop_axis=False)), ('minmaxscaler', MinMaxScaler(copy=True, feature_range=(0, 1)))])), ('col_3-4', ColumnSelector(cols=(2, 3), drop_axis=Fa...ki',
    +           metric_params=None, n_jobs=None, n_neighbors=5, p=2,
    +           weights='uniform'))])
    +

    API

    ColumnSelector(cols=None, drop_axis=False)

    Object for selecting specific columns from a data set.

    @@ -1228,7 +1273,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/feature_selection/ExhaustiveFeatureSelector/index.html b/docs/_site/site/user_guide/feature_selection/ExhaustiveFeatureSelector/index.html index ef0caec83..cd9e41e5f 100644 --- a/docs/_site/site/user_guide/feature_selection/ExhaustiveFeatureSelector/index.html +++ b/docs/_site/site/user_guide/feature_selection/ExhaustiveFeatureSelector/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,23 +930,23 @@
    @@ -1801,7 +1809,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/feature_selection/SequentialFeatureSelector/index.html b/docs/_site/site/user_guide/feature_selection/SequentialFeatureSelector/index.html index 450353d50..0745c0e39 100644 --- a/docs/_site/site/user_guide/feature_selection/SequentialFeatureSelector/index.html +++ b/docs/_site/site/user_guide/feature_selection/SequentialFeatureSelector/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,41 +930,41 @@
    @@ -2278,7 +2286,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/file_io/find_filegroups/index.html b/docs/_site/site/user_guide/file_io/find_filegroups/index.html index b506fca82..a513d5e88 100644 --- a/docs/_site/site/user_guide/file_io/find_filegroups/index.html +++ b/docs/_site/site/user_guide/file_io/find_filegroups/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1052,7 +1060,7 @@

    API

    diff --git a/docs/_site/site/user_guide/file_io/find_files/index.html b/docs/_site/site/user_guide/file_io/find_files/index.html index 9a0596b3e..dd8ee723a 100644 --- a/docs/_site/site/user_guide/file_io/find_files/index.html +++ b/docs/_site/site/user_guide/file_io/find_files/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1017,7 +1025,7 @@

    API

    diff --git a/docs/_site/site/user_guide/frequent_patterns/apriori/index.html b/docs/_site/site/user_guide/frequent_patterns/apriori/index.html index dfa7953ab..320adad44 100644 --- a/docs/_site/site/user_guide/frequent_patterns/apriori/index.html +++ b/docs/_site/site/user_guide/frequent_patterns/apriori/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -1686,7 +1694,7 @@

    API

    diff --git a/docs/_site/site/user_guide/frequent_patterns/association_rules/index.html b/docs/_site/site/user_guide/frequent_patterns/association_rules/index.html index 53debc727..6e978d781 100644 --- a/docs/_site/site/user_guide/frequent_patterns/association_rules/index.html +++ b/docs/_site/site/user_guide/frequent_patterns/association_rules/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -923,12 +931,12 @@
  • Association Rules Generation from Frequent Itemsets
  • Overview
  • Metrics
  • - +
  • References
  • Example 1 -- Generating Association Rules from Frequent Itemsets
  • Example 2 -- Rule Generation and Selection Criteria
  • @@ -2191,7 +2199,7 @@

    API

    diff --git a/docs/_site/site/user_guide/general_concepts/activation-functions/index.html b/docs/_site/site/user_guide/general_concepts/activation-functions/index.html index 6423f44b7..51133ab99 100644 --- a/docs/_site/site/user_guide/general_concepts/activation-functions/index.html +++ b/docs/_site/site/user_guide/general_concepts/activation-functions/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -934,7 +942,7 @@

    Activation Function diff --git a/docs/_site/site/user_guide/general_concepts/gradient-optimization.ipynb b/docs/_site/site/user_guide/general_concepts/gradient-optimization.ipynb index 6fc6f5ef6..e879f0ebf 100644 --- a/docs/_site/site/user_guide/general_concepts/gradient-optimization.ipynb +++ b/docs/_site/site/user_guide/general_concepts/gradient-optimization.ipynb @@ -22,11 +22,11 @@ "\n", "Compatible cost functions $J(\\cdot)$\n", "\n", - "- Sum of squared errors (SSE) [ [mlxtend.regressor.LinearRegression](./regressor/linear_regression.html), [mlxtend.classfier.Adaline](./classifier/adaline.html) ]:\n", + "- Sum of squared errors (SSE) [ [mlxtend.regressor.LinearRegression](../regressor/LinearRegression.md), [mlxtend.classfier.Adaline](../classifier/Adaline.md) ]:\n", "$$J(\\mathbf{w}) = \\frac{1}{2} \\sum_i (\\text{target}^{(i)} - \\text{output}^{(i)})^2$$\n", "\n", "\n", - "- Logistic Cost (cross-entropy) [ [mlxtend.classfier.LogisticRegression](./classifier/logisitic_regression.html) ]:\n", + "- Logistic Cost (cross-entropy) [ [mlxtend.classfier.LogisticRegression](../classifier/LogisticRegression.md) ]:\n", "...\n", "\n", "\n", @@ -192,7 +192,7 @@ "metadata": { "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python [default]", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -206,9 +206,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.2" + "version": "3.6.7" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 1 } diff --git a/docs/_site/site/user_guide/general_concepts/gradient-optimization/index.html b/docs/_site/site/user_guide/general_concepts/gradient-optimization/index.html index 8a6607b8e..12f5d5d35 100644 --- a/docs/_site/site/user_guide/general_concepts/gradient-optimization/index.html +++ b/docs/_site/site/user_guide/general_concepts/gradient-optimization/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,19 +930,19 @@
    @@ -946,12 +954,12 @@

    Gradient Descent (GD) Optimization

    @@ -1070,7 +1078,7 @@

    References

    diff --git a/docs/_site/site/user_guide/general_concepts/linear-gradient-derivative/index.html b/docs/_site/site/user_guide/general_concepts/linear-gradient-derivative/index.html index badcffb7b..380f92681 100644 --- a/docs/_site/site/user_guide/general_concepts/linear-gradient-derivative/index.html +++ b/docs/_site/site/user_guide/general_concepts/linear-gradient-derivative/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -1025,7 +1033,7 @@

    De diff --git a/docs/_site/site/user_guide/general_concepts/regularization-linear/index.html b/docs/_site/site/user_guide/general_concepts/regularization-linear/index.html index 0efd8a042..17d9a7c26 100644 --- a/docs/_site/site/user_guide/general_concepts/regularization-linear/index.html +++ b/docs/_site/site/user_guide/general_concepts/regularization-linear/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -969,7 +977,7 @@

    References

    diff --git a/docs/_site/site/user_guide/image/extract_face_landmarks/index.html b/docs/_site/site/user_guide/image/extract_face_landmarks/index.html index c2b62982d..9b6b73378 100644 --- a/docs/_site/site/user_guide/image/extract_face_landmarks/index.html +++ b/docs/_site/site/user_guide/image/extract_face_landmarks/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1041,7 +1049,7 @@

    API

    diff --git a/docs/_site/site/user_guide/math/num_combinations/index.html b/docs/_site/site/user_guide/math/num_combinations/index.html index 0f5945cbc..1376c6862 100644 --- a/docs/_site/site/user_guide/math/num_combinations/index.html +++ b/docs/_site/site/user_guide/math/num_combinations/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1048,7 +1056,7 @@

    API

    diff --git a/docs/_site/site/user_guide/math/num_permutations/index.html b/docs/_site/site/user_guide/math/num_permutations/index.html index c76fd819c..6645f2cb2 100644 --- a/docs/_site/site/user_guide/math/num_permutations/index.html +++ b/docs/_site/site/user_guide/math/num_permutations/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1038,7 +1046,7 @@

    API

    diff --git a/docs/_site/site/user_guide/math/vectorspace_dimensionality/index.html b/docs/_site/site/user_guide/math/vectorspace_dimensionality/index.html index 95d3bd53f..2d8750a64 100644 --- a/docs/_site/site/user_guide/math/vectorspace_dimensionality/index.html +++ b/docs/_site/site/user_guide/math/vectorspace_dimensionality/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1011,7 +1019,7 @@

    API

    diff --git a/docs/_site/site/user_guide/math/vectorspace_orthonormalization/index.html b/docs/_site/site/user_guide/math/vectorspace_orthonormalization/index.html index 446b42ca0..433d35972 100644 --- a/docs/_site/site/user_guide/math/vectorspace_orthonormalization/index.html +++ b/docs/_site/site/user_guide/math/vectorspace_orthonormalization/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1025,7 +1033,7 @@

    API

    diff --git a/docs/_site/site/user_guide/plotting/category_scatter/index.html b/docs/_site/site/user_guide/plotting/category_scatter/index.html index accde3e85..5bdf3c098 100644 --- a/docs/_site/site/user_guide/plotting/category_scatter/index.html +++ b/docs/_site/site/user_guide/plotting/category_scatter/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1106,7 +1114,7 @@

    API

    diff --git a/docs/_site/site/user_guide/plotting/checkerboard_plot/index.html b/docs/_site/site/user_guide/plotting/checkerboard_plot/index.html index 23d22ac8d..85c59f0c3 100644 --- a/docs/_site/site/user_guide/plotting/checkerboard_plot/index.html +++ b/docs/_site/site/user_guide/plotting/checkerboard_plot/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1030,7 +1038,7 @@

    API

    diff --git a/docs/_site/site/user_guide/plotting/ecdf/index.html b/docs/_site/site/user_guide/plotting/ecdf/index.html index 4afac3615..452f309c7 100644 --- a/docs/_site/site/user_guide/plotting/ecdf/index.html +++ b/docs/_site/site/user_guide/plotting/ecdf/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,16 +930,16 @@
    @@ -1071,7 +1079,7 @@

    API

    diff --git a/docs/_site/site/user_guide/plotting/enrichment_plot/index.html b/docs/_site/site/user_guide/plotting/enrichment_plot/index.html index 6456c6b8a..8f0d84e40 100644 --- a/docs/_site/site/user_guide/plotting/enrichment_plot/index.html +++ b/docs/_site/site/user_guide/plotting/enrichment_plot/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1075,7 +1083,7 @@

    API

    diff --git a/docs/_site/site/user_guide/plotting/plot_confusion_matrix/index.html b/docs/_site/site/user_guide/plotting/plot_confusion_matrix/index.html index e43818f75..439406ecd 100644 --- a/docs/_site/site/user_guide/plotting/plot_confusion_matrix/index.html +++ b/docs/_site/site/user_guide/plotting/plot_confusion_matrix/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,17 +930,17 @@
    @@ -1048,7 +1056,7 @@

    API

    diff --git a/docs/_site/site/user_guide/plotting/plot_decision_regions.ipynb b/docs/_site/site/user_guide/plotting/plot_decision_regions.ipynb index 17bf3dfee..204925dc9 100644 --- a/docs/_site/site/user_guide/plotting/plot_decision_regions.ipynb +++ b/docs/_site/site/user_guide/plotting/plot_decision_regions.ipynb @@ -164,10 +164,12 @@ "import numpy as np\n", "\n", "# Initializing Classifiers\n", - "clf1 = LogisticRegression(random_state=1)\n", - "clf2 = RandomForestClassifier(random_state=1)\n", + "clf1 = LogisticRegression(random_state=1,\n", + " solver='newton-cg',\n", + " multi_class='multinomial')\n", + "clf2 = RandomForestClassifier(random_state=1, n_estimators=100)\n", "clf3 = GaussianNB()\n", - "clf4 = SVC()\n", + "clf4 = SVC(gamma='auto')\n", "\n", "# Loading some example data\n", "iris = datasets.load_iris()\n", @@ -180,23 +182,9 @@ "execution_count": 5, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n", - " FutureWarning)\n", - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:459: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n", - " \"this warning.\", FutureWarning)\n", - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/ensemble/forest.py:248: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22.\n", - " \"10 in version 0.20 to 100 in 0.22.\", FutureWarning)\n", - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n", - " \"avoid this warning.\", FutureWarning)\n" - ] - }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAk0AAAHiCAYAAAD1WPj+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3Xd8W9Xdx/HPkeS9ZzwynL0HWYQEQhgJCTMlEHbYYRVKoaXQ52mhfTpoaWlZpUDLHmkYCSWshEBCBoTsndhxbMc73rY8ZEs6zx+SHcuRbXnK4/d+vfwikq7uPTLRN+ee+7vnKK01QgghhBCiZQZvN0AIIYQQojeQTpMQQgghhAek0ySEEEII4QHpNAkhhBBCeEA6TUIIIYQQHpBOkxBCCCGEB6TT1IsppW5QSq1t53sPKqXmdXKTejyl1OdKqZu93Q4hROdRSs1TSmV5ux2i75NOUzdRSqUrpS7szH1qrd/RWi/w4NivK6V+1+S947XWG9pyPKVUklJKK6XMzp90pdSjbWy2V2mtF2mt3/B2O4To65z5UO3MijxnDgV7u10d5czAykY5WNrNx5cOohdJp0m0R7jWOhi4CviVUmp+Zx9AKWXq7H0KIbrdZc6smAKcATzm5fZ0lsla62DnT3hb3yz51ntJp6kHUErdqZQ6ppQqVkr9VymV0Oi1BUqpo0qpMqXUP5RSG5VSdzhfu0Uptdn5Z6WU+ptS6qRz231KqQlKqeXADcAjzrOiT5zbN4x8KaWMSqlfKqVSlVIVSqmdSqlBrbVba70DOIgjEOvbm6CU+lApVaCUSlNKPdDotQCl1BtKqRKl1GGl1CONz5icbfqFUmofUKmUMrWyv5lKqR1KqXKlVL5S6mnn8/5KqbeVUkVKqVKl1Hal1ADnaxsa/f4MSqn/VUplOH9vbyqlwpyv1Y+q3ayUOqGUKlRK/U+b/+cKIdBa5wFf4poVlyildju/v5lKqScavdbi98+ZJa87s+QQMKPx8ZRSY53f9VLlKEW4vNFrrzuz9HNnJm5RSsUppf7u3N8RpdQZ7fmcrWS5Vkrdp5RKAVKcz41RSq1zbn9UKbW00fYXK6UOOTM5Wyn1M6VUEPA5kKBOjXQlnNYQ0XW01vLTDT9AOnChm+fPBwqBqYAf8BzwrfO1aKAcuBIwAT8B6oA7nK/fAmx2/vkiYCcQDihgLBDvfO114HfNtQf4ObAfGO1872Qgyk1bkwANmJyPZwFVwI+cjw3ONvwa8AWGAceBi5yvPwlsBCKAgcA+IKtJm/YAg4AAD/b3HXCT88/BwCznn+8CPgECASMwDQh1vrah0e/vNuCYc7/BwEfAW00+6yvOtkwGLMBYb/9dkh/56Q0/TTJmoDNjnmn0+jxgovN7PgnIBxY7X2vx++fMkk1ApDMvDtRnCeDj/F7/0pkb5wMVwGjn66/jyNxpgD/wNZAGLHPmxe+Ab1r4XBoY4eb5ZrO80fvWOdscAAQBmcCtOPJ9qvP9453b5wLnOP8cAUxt9HvL8uT/gfx0/o+MNHnfDcCrWutdWmsLjuHrs5RSScDFwEGt9UdaayvwLJDXzH7qgBBgDKC01oe11rketuEO4H+11ke1w16tdVEL2xcqpapxdFr+Aax2Pj8DiNFa/1ZrXau1Po4j9K51vr4U+IPWukRrneX8PE09q7XO1FpXe7C/OmCEUipaa23WWn/f6PkoHMFm01rv1FqXuznWDcDTWuvjWmszjt/9tcp16Pw3WutqrfVeYC+O8BZCeGa1UqoCR+fgJPB4/Qta6w1a6/1aa7vWeh/wHnBuk/c39/1bCvxea12stc7ENUtm4TgJetKZG18Da4DrGm2zypkLNcAqoEZr/abW2gb8B8elxJbsco5ilSql6o/dUpbX+6OzzdXApUC61vo1rbVVa70L+BBH2QM4cmycUirUmZm7WmmT6AbSafK+BCCj/oHzH+8iINH5Wmaj1zTgtgDQGQzPAy8A+Uqpl5VSoR62YRCQ2oY2R+MIpZ/hOOvxcT4/BMewcX2YlOI42xvgfN3l8zT5s7vnWtvf7cAo4IjzEtylzuffwnEpYIVSKkcp9WellA+nc/ndO/9sarR/cO2kVjk/txDCM4u11iE4cmIMjuwAQCl1plLqG+el9zLg7savOzX3/WuaJY2/xwlAptba3uT1xEaP8xv9udrN49a+51O11uHOn/qSgZayvF7TfDuzSb7dAMQ5X1+C48Q5QznKMs5qpU2iG0inyftycHx5AHBes44CsnEMzw5s9Jpq/LgprfWzWutpwHgcnYmf17/UShsygeFtabRzBOevQA1wb6P9pDUKk3CtdYjW+mLn6y6fB0dn7bRdN2lXs/vTWqdora8DYoE/AR8opYK01nVa699orccBs3Gc0S1zcyyX3z0wGLDiGqBCiA7SWm/EcVnsL42efhf4LzBIax0G/BNHeYAncnHNj8GN/pwDDFJKGZq8nt3GZrdVS1ler2m+bWySb8Fa63sAtNbbtdZX4Mi31cBKN/sQ3Uw6Td3Lx1mkXP9jwhEctyqlpiil/IA/ANu01unAp8BEpdRi57b3ceosxIVSaobzzM0HqMTRmbE5X87HUbfTnH8B/6eUGqkcJimlojz8TE/iKDL3B34AypWjmDtAOQrMJyil6os0VwKPKaUilFKJwI9b2XeL+1NK3aiUinGeUdbf9mtTSp2nlJqolDLiqAmra/S7aOw94KdKqaHKcSv0H4D/OC+FCiE619+B+Uqp+mLwEKBYa12jlJoJXN+GfTXOkoHA/Y1e24YjAx9RSvkox3x0lwErOvwJWtZSlruzBhillLrJ2U4fZ46PVUr5Ksc8fGFa6zocOdY4z6OU86YV0b2k09S9PsMx9Fv/84TWej3wKxzXsnNxjPhcC6C1LgSuBv6MY5h3HLADR0FkU6E46n1KcAwRF3HqrO7fOK6NlyqlVrt579M4Qmgtji/nv3EUKnriU+cx73TWA1yG4w6ZNBxFjf8C6r/cv8VxeTEN+Ar4oJnPAjhGs1rZ30LgoFLKDDwDXOusUYhz7rscOIyj+PxtN4d4FcelvG+d+6/BNXyFEJ1Ea10AvIkj78AxQv1bZ83Trzk1kuKJ3+DIuTQcufVWo+PUApcDi3Bkxj+AZVrrIx39DC1pKcub2b4CWODcJgfHpcg/4SgiB7gJSFdKleO4dHmj831HcJzwHXdmutw9142Uo0xG9AbO4eYs4Aat9Tfebk9HKaXuwdHRaVr8KYQQQvQ4MtLUwymlLlJKhTuHe3+J45r/9628rUdSSsUrpeYox/xIo4GHcdy5IoQQQvR4Mitpz3cWjmvlvsAhHHejVHu3Se3mC7wEDMVRg7QCx9C5EEII0ePJ5TkhhBBCCA/I5TkhhBBCCA9Ip0kIIYQQwgNdUtO04uAbcs1PiH7k2vE3ezopYY8n+SVE/zIycgzT4s/0KMNkpEkIIYQQwgPSaRJCCCGE8IB0moQQQgghPCCdJiGEEEIID3Tb5JZKK4IIxc/gh/J4Ievuo9FY7BYqKUcrqQMVQpwi+SWEgG7sNAURSmhgKBg0PTBzQIOf3Q+qwEyZt1sjhOhBJL+EENCNl+f8DH49N3DA0S6DdrRTCCEakfwSQkA3dpoUqucGTj1Fjxx6F0J4l+SXEAL6WSH4tg0/cOP5t3D9uct45x/vebs5QgjRJpJhQnhXv+k02Ww2/v7r5/jz63/gjXX/Zv1/vyE9JcPbzRJCCI9Ihgnhfd1WCN4Wd1/1MKWlVac9Hx4eyD8/+Gu79nl4z1EShySQMDgBgPMvm8fmtVtIGjmkQ20VQojGuiK/QDJMiJ6gR3aaSkurGHX33097PvmfD7Z7n4X5hcQmxDY8jomP4fCeI+3enxBCuNMV+QWSYUL0BP3m8pzWbuYukZpJIUQvIRkmhPf1m05TTFwMJ3NONjwuyC0gOjbKiy0SQgjPSYYJ4X39ptM0ZvJostKzyc3Mpa62jq8/2cCc+bO93SwhhPCIZJgQ3tcja5q6gslk5MHf3s/Plj2K3Wbn4qULGToqydvNEkIIj0iGCeF9PbLTFB4e6LZoMjw8sEP7nXXemcw678wO7UMIIVrSVfkFkmFCeFuP7DR15LZcIYTwJskvIfquVmualFKjlVJ7Gv2UK6U6du+sEEJ0A8kvIURnanWkSWt9FJgCoJQyAtnAqi5ulxBCdJjklxCiM7X17rkLgFSttczdL4TobSS/hBAd0taapmsBt6tEKqWWA8sB7nj8Fi64el7HWiZEH7Jn8z6+WLmWgpxCYhKiWbh0AVPOnuTtZvU3kl9CtIPk1yked5qUUr7A5cBj7l7XWr8MvAyw4uAbbqauFaJ/2rN5H/95bSVJi+NJShpLWbqZ/7y2EqDfBk93k/wSon0kv1y15fLcImCX1jq/qxrT1Z78+VNcMe0qbllwh7ebIvqRL1auJWlxPBHDQzEYDUQMDyVpcTxfrFzr7ab1J5JfQrSD5JertnSarqOZoe3eYtFVF/HUG3/0djNEP1OQU0hYUrDLc2FJwRTkFHqpRf2S5JcQ7SD55cqjTpNSKhCYD3zUtc1xVVpcxuN3/pKykrJO2d/kMycREhbSKfsSwlMxCdGUpZtdnitLNxOTEO2lFvUvkl9CtJ/klyuPapq01lVAt68Mue79z7BmJrN25Wdcfdd13X14ITrFwqULeOVP/8buZ6Ou0opPkAmDxcidv7jd203rFyS/hGg/yS9XPXbB3tLiMrZ/to5nlsSz/bN1nXa2JkR3O34wjTpdR/yFkYxaPpD4CyOp03UcP5jm7aaJLiL5JfoKyS9XPXIZFXCcpV02QjFygD+XjaiSszXRazS9Pff44TSG3RpPxBjHpZWwIeAb6sO6t9Zz5V1XeLm1oitIfonerHGGFRcUM2r5ICLHSn5BDx1pqj9Lu2FaKAA3TAuVszXRK9TfnhsxP4AZj48lYn4ANVUWQoa6LtYaNiKYmsoaL7VSdCXJL9GbNc0wu81O0GBf7DZ7wzb9Ob96ZKep/iwtKtgxEBYVbOKyEYq1Kz/r0H5/c//vuffKBzhxPJOrZl3Lp//5vDOaK0QDd7fn+gQYKT3WpJDymBn/IH8vtVJ0Jckv0Zs1zTCfQBPmEzXYrKc6Tf05v3rk5bm9W3fxTU4N7+3LcXk+snBXh4a4H3/ufzraNCFaVJBTSFLSWJfnBp8TT9qKXIw3GAgbEUzZMTNpK3K5aPFFXmql6EqSX6I3a5phjvzKI+nqAUSOCev3+dUjO02/e+MpbzdBiHapvz03Ynhow3NxU2Mo2VFFxlsF1FRm4h/kz0WLL+qX9QD9geSX6M2aZtiYJcOoLrFw7N85oHP6fX71yE6TEL3VwqULHEsMLHZMAFeWbiZ9dS63/OLGVpcckPWdhBDe5i7DKDPwk9//uMU86i/5JZ0mITpRfUh8sXItyTmZxCREc82tSz3qMMn6TkIIb2tPhvWn/JJOkxCdbMrZk9ocFI2LLwHHfxc7nu9roSOE6NnammH9Kb+k0yREC7pryNldAXlYUjDJOZmdfqzOVmup9XYThBBuSH51Puk0CdGM7hxydldA3pPXdzKXmdn79R4Kj2YRbVQse/1ObzdJCNGI5FfX6DedppM5J/n9Q3+iuKAEg0Fx2XWXcNVtV3q7WaIH684h5+YKyK+5dWmnHqcjbFYb+7fs59jG/QwNC+L22WOYfN5ElFLeblq/IBkm2kLyq2v0m06T0WTkvv+9m1ETRlJlruLOy+5h+jnTSBo5xNtNEz1Udw45t7eAvKvVVNVwcOtBMncdI0xrfnTGMH5zz8X4+fp4tV39kWSYaAvJr67RYztN32/4gQ/f/ZDczDziB8Wx5PolzJo3s937i4qNIirWsdB5YHAgQ4YPpiCvUAJHNKu7h5zbU0DeFex2Oyl7UknZdABTRSW3zBnL3DsvwmDokQsI9EidnV8gGSbapr/mV1frkZ2m7zf8wCsvvUzSFQkMHjqB0rQKXnnpZYAOBw9AbmYeKYeOMW7KmA7vS/QdTYsmR40fyfbV212GnA+/nkZwcDAPX/Vomwore8McJqn7jrP/yx0E2WxcMDqRh5eeTXCgn7eb1et0dX6BZJhwr3HOBAT5k/d6HmNvGepyyWzGzBk8+cBf2pRFvSG/ukuP7DR9+O6HJF2RQOSIMADHf69wPN/R0KmqrObX9/yG+399L0EhQZ3RXNEHuCua3L56OzNmziB5XQrJOZn4+flh9Dcw6KqYhhDypLCyp85horUmKzWH/Wt3okvNTB8Uzdu3zcdkMnqtTX1BV+YXSIYJ99zlTPKKKjJWnMRicVwymzFzBtt/2N6mLOqp+eUtHnWalFLhwL+ACYAGbtNaf9dVjcrNzGPw0Akuz4UPDeFoZkaH9muts/Lru5/gwsUXMHfhOR3al2ibnn6m8sXKtYSMC+Do6jSqCmoIjPEnelwEyQdTePTZnwHw5AN/IWJ+bJsLK3vaHCalhaVsX7MNW14J0wdG8ffLZhAZ2nf/8e0r+QWSYd7UKzNsagSGDD8effY3gCPD2ppFPS2/vM3TkaZngC+01lcppXyBwC5sE/GD4ihNq2g4UwMoTasgflBcu/epteZPv/gLQ0YM4Zo7ruqMZgoP9YYzlczkbAwlmsFXxBI8JABzRjUnPj6JveDUnWHtLazsCXOYFJ8sYe/anVRmFZIUGsBDc8YxPmlAtx3fy3p9foFkmDf15wzrCfnVk7Ra2amUCgXmAv8G0FrXaq1Lu7JRS65fQvrHORQfK8Nus1N8rIz0j3NYcv2Sdu9z/44DrP3oK3Z9t5vbF93F7Yvu4vtvtnViq0VzGp+pGIwGIoaHkrQ4ni9WrvV20xrYlY3ERVGEDgvEYFSEDgskcVEUdmVr2Ka+sLIxTwor2/u+jqqpsrDlw02s+eMKTn60iUfOGMpbdy/idzec1286TH0lv0AyzJv6c4Z5K796Kk9GmoYBBcBrSqnJwE7gJ1rrysYbKaWWA8sB7nj8Fi64el67G1V/3f/Ddz/kaGYG8YPiuPOu5R2qB5g0YyIb079q9/tF+3X1mUp7h80bv89SacEn2Ii1xobRz4jNYsM3xITJeOor0t65SLpzDhNLtYXd63ZRcPQEkUqx9KwxnH3BpP48l1KfyC+QDPOmrsywzsivmIRotE3jG2Lq9AzrT3MwecKTTpMJmArcr7XeppR6BngU+FXjjbTWLwMvA6w4+IbuaMNmzZvZaXeaCO/qyltf2zts3vR9W/+4m8psCwajEW2vxeTjg7HKj8RhCQ3vae9cJN0xh8nBLQc49t1hYpXmxjnjmDJnAT5S0A2SX6ITdFWGdVZ+laWbST1iw16isPloLHXVnZZh/WkOJk940mnKArK01vXjwB/gCB0hPNKWMxVPzroab1NVWcWYW5I6XJw94pIh7H83BVOAAXutxi/YB4PFxC0/XebyvvbORdLZc5jYbXb2bdpH1q5UfKtruGzqCH5+/VxCgwI67Rh9hOSX6DBPM8xb+RUxPJShFyVy5K0MAmP8sJjrOjXD+sscTJ5otdOktc5TSmUqpUZrrY8CFwCHur5poq/w9EzFk7Ouptt8/Ytt2AItVJmrCQx2dBjaW5ztE2Qk9twwQhIDsRTYKNpQ0SmfvzMd25dK6tbD1BWUctNZo5l747kE+Pl6u1k9luSX6AyeZJi38ysoNgDlA3HnReEXY+yxGdbbeXr33P3AO847T44Dt7b1QBrtuNm3J5dWaGc7Rafz5EzFk1tbm24THB9IbYWVcp/yhtBpS3F2/X7Sv85m8OJYAiMDiBs8AEZBVFx5j7itNictlz1rtkF5JeeNSuCuhWcQExHi1Tb1MpJfosNayzBv5hdA6ucnGPqjBEbMSXI80YMyrC/xqNOktd4DTO/IgSx2C352PzDonhk8GrArLHZLz2xfP+BJsWVBTiGWXf5sf24/ddU2jD6KircrGXxZLLGJMR7Pett0uN2cW8XAkChCI0ObPXZ3yk7N4fDGfVTnFTMpPpLnl5wll97aSfJLdAdv5ldZupmK7GqmTIht8fii47ptRvBKyqEK/Ax+qB74rdZoLHaLo53CKzwptrRZ7GRtz2fYzfGEDg+kPLWK4+/kkvZeHqXrLR7Pett0uN2oTRir/BrO9twdu6uVFJRyeNMBig+fYNrAKH49ZwyD4yK77fiieZJfojXezK+YhGgSk+KpK7TCqenB+vXUAF2l2zpNWmnMlGHuyaPHPS8LewVPih8/eulj1q1eT01lDf5B/sxffAGAy3MTzhhP+urMFostq2oqGXxtLL7hJqpPWvANNzFkyQAyVxTy1w+eBDyf9bbxcHt9rYF/oF+33lZbnF/CgW/2UHI8l+ERIdw0bTgzLpCh9J5G8qtva0+GJSYmkJ2d0yPyq/4zyNQAXa9Hrj0neg9Pih8/euljvvz0S4beFE/YiGDKjpn5/O0vsFnsjLp9YMNz+1bsZdKEyZSsq2i22LK2uhb/GB98w3ww+CrstRpt09RW1zZs0545VbrzttrK8kqObjtC9o5kRkQEc99ZYxi3cGp/nktJCK9pT4YV7isl/aN0YmdHMOiioV7Pr8ZtlakBupZ0mkSHeFL8uG71eobeFE/EGEfxcsSYEBIviSJvY4nLc1wLB946yIufP9vs8ZRRUVdhIzDBMZm90d/xWBlPdTjaO6dKV95WW1VRxcFN+8k9kE6sr4lLpgzjwnsulo6SEF7Wngzzj/Zh6A1x5K4rxmBSPSK/QKYG6A7SaRId4slZUU1lDWEjgl22CRjgi6WoloPPpVFTWIt/tC8Dzomk2lzdYgGkr8mXrE8KMJgMBCf5Y06vIeuTAuy1moevepSYhGhGjR/J9tXbvT5MXVdbR+qeVFK/O0y03caPZo5i1m3z8fWRr50QPUV7MkxrsFXbMadXs/Pxo30yv4R7kt6iQzw5K/IP8qfsmLnhLA2gcHs5pgAjCRdHEzzYH/OJGjI+yMdutRMxP6DZYfKh45OwxJnJ+qSAmuJa/CJ8CB4aiMlgYsZjjvdsX72dGTNnkLwupduHqe12O4e2HiRjRwr+ljouGDeQh66eLXe+CdFDtSfDKo5VUrCjnKHXxRE1JbTP5JdoXasL9grRkoVLF5C+OpeS1HLsNjslqeWkr85l1PiRPPnAX3j4qkcJDAok5dUsSo5UYLdqSo5UULCtjMRF0QTG+6EMisB4PwbMDccvwq/FRTEXLl1ATaqVideM5sI/nUXivFgq02oYcckQl/fs+HZXt/4eMg5n8OVLn/LFkyuYXlHFP5aezSt3L+LauROlwyRED+Yuww6/noa5zMzDVz3Kkw/8hQlnjCdtRW5DhhX8UE7c3AhCRwT1ifwSnpORJtEh7ooP3d0ye/DNWo69kovNZsU/yB9/X3/ixsZiLqrAbtcYDIrAAf4YTK41Pk2HyZser6y4jIm3jiTujFNnhTWlFgrLChhybWyb1nNqq6LcIrav3oqtqJzZw+P44/zJxEWFtv5GIUSP0TRT/Pz8MPobGHRVTKPLY5lMmjCZA28dpKYyE7tdEz44FKu5jspya6/ML9E+0mkSHda0+NDdLbPjlw2nZF01jz77s4ZtVKWRwSMHN7zv2JZ0/IJ9XPbtrgCy8fGefOAv+If7ubye+vkJkq5o/Zbd9sjNyOPIpgNUnDjJ6KgQ/rxoKrEyO7cQvVrTTImYH3tafpSsq2go8n7ygb8QQAARI0+dJPWG/BIdJ50m0ek8KaxcuHQBrz7zOlHzgvAf4EtNfi0nvyrDx+BLSWq5xwWQ3TEzbnlxOYe2HOTk3uNMTozi5zNHMuIyWcFeiL6or+WX6FzSaRKdzpPCyuMH06gsqaTu21qsVTZMgUZqK+oYN3N8i/OcNNVVM+MW55dwZMtBTh45weCQQK6fPpyZ91/Wht+CEKI36gv5JbqOdJpEp3N39tT0jGvd6vWMvG2gyx11JUcq2P3KXkZMHNbi/t3N3lt/2a/+9fbMjFtlriZlRzLHNx9gVHQot0wdxlSZnVuIfqW35pfoHtJpEp3Ok5lp3c3dZKuxo/1bvmXXk9l72zIzrrmskqPbDpO+I4W4AB8unjCEP/z4Upl0Uog+SGtN3ol8tP3UejgBwQFExIQ3PO5N+SW6n3SaRJdobWZad3M3ZX1ZwOBLY1osgPRk9t7Wjl9rqSXjYAYHv9nDAIPiiqnDOe9emZ1biL7uRHIWmR9sZMbYUzegrNqezLV/vMNlu56cX8K7pNMkXLzwy5fYsWUnNqsdo8nA8JHDMAWaTpvhtrykgtd+/Qq3/d9yQsKDW91v0yHpCWeMZ9+KvXAtDWvPVeXUEDU8nLwT+Vjr6jD5+BAcHUJBTmHDfjxdl6np8S66ej7+Pj6kbD1EsNXG3FEJ/Pym8/H3c73bRQjRezXNr+lzpjHn4rMasiAoJIhrJo7ikmkjuOvJt3n5sZvYnV3s0b4bZ0r93HMjbxvYLfnlbgFh4R3SaRINXvjlS+zeu5uRyxMJHR5I9rpC0r9LZ8SSgcy403UoOTf5BIacdDZ/tIFFt13a4n7dDUk3nffEP8if0LAQSjLKiZwYiq9fADaLjex9OQQE+Tfsy5MizfrjDbk8jkifwWSsy+bl37zC8gVz+NfN8wn09+3cX5wQwuua5ld5ahXbX93BocMHmXTbaJKSxpK1JZ83vthG/skiSvIyeWPNFjz5Z9BdhjWde66z86ulS3jCezzqNCml0oEKwAZYtdbTu7JRons0PZs5sieZUXclEjY6CADz8WqG3hCHIZCG2WpZDP/55weU5RYQ4qM5/M6nhCfEctbC5m/B/2LlWkLGBXB0dRpVBTUExvgTPS6CiowKl8Utf3nD4+R9U4J/lB/BQwKoyrGQ900JYcaIhm3qizTtl9mw+lgw1flz4pN8lyLJNW9/hiFUk7c5n8jEQGZdNQTzuTXsWZshHaZ+SPKr72qcYYX5hYy6a2BDfoWNDiIg2peYc0IbOimhA4OwTAnina93Exto4MkP1zN94nhmtXIcdxkWNysKQ4ZfQxF3Z+WXp5fwhHe0ZaTpPK11Yeubid7A3dmMfaeNkKGnlvyoKawjdHggVTmWhud8ok3knshl2i0JDB8dROrRSla8uoKAYP9mv9CZydkYSjSDr4gleEgA5oxqTnyNojN8AAAgAElEQVR8EnuBaw2RxWJh1BVJZHye0xBMoy5J4sTHJxu2qT/GO8+8S2leEeFxUdzwk+sZPHIQX7/9FZWZheQeyeSCX40nNDaw4X1+QSY252V1yu9O9EqSX31M0wz7/J5NBA8JQNtBORcIs1bZ8R9w6kSpptpCtZ8VQ4CB8341nJQjlWx66wCTNu9rsUPiSYZ1JL8aH9vTS3jCO+TyXD/l7mzG6Gek/FgV4eMcNUr+0T6Up1bhE3jqr0nugXwCYn0ZOiYIg0ExdEwQ5lr49N3Pmw0du7IxaFEMocMcnZjQYYEkLooi460Cl+1iEqLxD/dj1sOTG54rSS0/bX6SYeOHMsCkePv2Qdz0TiHH1u3G/0AGj5wzjhGXzeSizHQsFVZoND9cUbqZoXGR7f+FCSF6lKYZZvQ3YD5RTeiwQJTB0WsyBRqoya+F8Y731PnUUpFejbJrdq0pwK4hbGJgi/kFnmVYfX7FjIqAgVZQULi9EJ8aGzue/ahhu+rqWgyFZm4aF8ynyWYqth5ix65jDa/71Ng4+MpRAiNPzRReVWxx2c+uo5nc+rd78fGVmszu5mmnSQNrlVIaeElr/XLTDZRSy4HlAHc8fgsXXD2v0xopOkfjoeyy4jImzhjp8nri7FjSVuQx7IZ4QocHEjwsgLR38hixZCB2m91Ri7Qql1EXRWEyOM6wTAaFyWLj0MFUHr7qUbdFiyajCZ9gIzVmC8qo0DaNT7ARk9H1r58n86NUlJp5+/dv4ldVwWe767h2jC9xA4J46LpzG7Z58Ip5PLLiE2yXaSp8LYTU+pHxyUn+fK1MTtlPSX71AU3LCTKTs0m6c0rD64mzB5D+Xh5Drh5A+OhgylOrqC6spWB9OSVDHLN0m7MrsZVbmXlNPAMnOE4O88qsbP5jBk8+8JdmC689ybD6/FLBdubdOoKidDPHP87n2fuXsGjWuIbtnn5nLT+ZHcJDc8N4+tsyCDbx0A0XNrx+wegEHlnxCYPmhbvkV+P9PPGfb7FZ7fhItUG387TTNEdrnaOUigXWKaWOaK2/bbyBM4heBlhx8A3tbifCe5oOZR///gTJn6ajDKphschBZ8ZTss1MysvZLnfPsdfE9s8PO0Z8qsDiY+TISSsApcmV5G0rIWnpAEbPHeG2aDEsKpTKbAuBBl+0TaOMiqrsWsKaLG7b3PwkE84cx94t+0lZv4c4fx+qklP54uY4ooNNFJqtLF25nZsvnUNUmKOWoT5YHnr5I7JPlpAYG8HTy690CS7RcSfyivnvtlSO5Jl5fvb93m5OSyS/epnvPtpMRe6pu9qycwpIzktzKSfIyLByfG0WIxY5pg+YeO0oaoprSflXNtoORpOBGXOmN9w9l5yTSUlBMbFnhWOO9W/IsLytJahA3eL8SgZtIH1VPj6hRrTWKKWoK7ehLYq1L/y3oZ0jo4ewa/sBNv/2CEPjIvnztZe55E5hqZk1G7ezcqljqoJlU4Mkv3oZjzpNWusc539PKqVWATOBb1t+l+hJmg5lJ0wegM1u5dinGcROimwY1bnz8dtOG6ZumF7gt8tJPXCc1//2Jif9yrCY67BWWYmeGcaQGYNcisU/eOWjU6NaReWYzAaGXBVLRJI/Jek15G90LZCsVz8/SXVlNUe3H+X45kOUbD3MFROH8Jt7L+H5lV8zbKIf0cGOv7rRwSYuHWHgHx98w56ULF5+7CaiwoKYMWYw8SbF6uUDuWdNFTPHDun6X3I/kJpVwKptx9mbWYFv7DAGTb+OyZeM9nazWiT51fvUZhfx6rLzGh4n3fB/DL0ryaWcYMz1Qznw72NEjQpvGJk2Vfvw0z8+cFqGDRs/lNd+/QpX3XEl/3llJcdP5GMx1+EX7ONY6+3e0S77rllg4aXf/4vAoEBiEqKpKavCN8RI7IJIIpICKEmvJvvjQgZHhfHqjfOatP6aZj/Xm59u5dIRBsmvXqzVTpNSKggwaK0rnH9eAPy2y1smOlXT4sLA4AASJyWQvuIk239zuMVZZ7es2tgwvUD8qMEY/Q3EzovCL8ZIRVYVhd+VU55SSeAZjiJyn2gT2em5zPrZJJKSxpKy+TgnPisg8/08Mus0vuEmEucOoOhrs8tx7DY7xw+mcfjrvfiaq7l21ij+ePcil0knN+xKJuekhXf3n3R5r1XvItJk4Y01W3johgUN4TQ61o9LR9Q0PC/axlJbx5YDGXy2O5u8Cit+8aMYduatzL1iqLeb5hHJL++qMldTkFXQ+oZNVFfXuDwuq6giLMl1PrgB46I45ptFybrqZmfOLj5ZQkVxBZs//pbq46l8/5kBg58i9jxHflkKbFSvzoFa1zbbAi3YlJUZjztGng5vryU8IoysD/I5XmHDN8TIgEkRFP1Qyd6UbJd2BQX4MmJgjNvPJfnV+3ky0jQAWOX8h8sEvKu1/qJLWyU6nbv5QeoKrYycONxl3aOmyksqOLhuCy9cGct9a7awa8dBRl07pGE/ef75+Ef6kb4+u+EyX96Bk4QkBpy6zXdoMEOvVhSvL+bCewZhtWv276skMtYx0nR8/3H2fLGdYJudeSMTeOi6uc1ODfDfv/74tOcKS80sfeQZXrw0mnvWbOfSc6a0OgQummeusvDt/gxWfX+cap9wIsfOZtwN9zHa1CvvG5H88qINr6/lksHRtHWy/YUXTnF5HBYS6HZ+o8RhCS3m18aXPmX+yHjSNuzgrunBPLbuAEk/SmLEHOfIzSjHf1I/P0H8NEdHp7y4nNoKK8HxgQ2j54MuHUDut4VMvzwGo1LYtCYj3QJBgbxTWuVyzF3/+Zav/vc6t+2R/Or9Wk1BrfVxYHJr24mezZMia3e2rNrI5SMMjBzgx+UjqvnbD9kMT5rW8HpoZCjWujoqcio5cTiDABVC+se5jL9mRMM2PiYTfqEmKvIt1FbXUZZbR94n+YQFD+DTP77H9IFRvHvbAkwmo8efp7DU3DCjb9Ozsl88/75HQ+DilJyCMlZsOsLeTDN2/1Bix5/NjHsf7PVLy0h+eZdBKa45d2KH9zN17HB2r85qc35FR4RQXlzCbVMDuP/sMB7fWIQhwLVkLW5CLClvZ1FwuJhqKqjKr6PguzJGLjx1SSx+ehTZX+QTEulLaJyJ/KM1FG4tYUhcAlVHXKcyGR4d1urnaU9+vbFmC8sumc1dT77NiDEjWjmC6Cq98tRRtF17FoGsH2X632scZ3fXTQvm71uLKUktJ2qUY4HLwOAAytL8sFXZOPJ0Gn4hQURHxeAffup2WVudlapsCzXlVt7/WQoKRbifD0Ojjbx17yXt+jxvfrqVkrxM/vHBN3yzba/LWdlLz6eTlhXAu/stLu9pOgTen2mtycwv4d2NRzh80oKKGMyIObcw+9IkDM7btYXoKUYNHkBNcAD7Xz1CRZmZkLBgJp0xltL0k2xI/6rZ95XmF3M4+UhDPoyM8iVlUx6qRjVMS1BVWENIQBC7nz5EnaUWbTAQMyGSqtxqjuc65kYqOF6E3aL56m8Z2Gx2FIqYQF+GR/jz/E3nNXv85rQnvxLykwEoyctkj10x9rS9iu4gnaZ+pK2LQNaPMkU5z3iigk1cMjyQte+mMe6m4Q1nfFmfFRBssTIp1kBKSRUX3rmEdavXwWLwj/ClcJOZ/G3FhCtf/n1jJE9/b+X9px5sdrSn8VlYVFiQ28drNm7nxSujuebd71gyIdDlrOyu2ZGQOM2lY9R0CLw/DnVbauvYsP8EX+w6QbnVBx0xmFFn38ucAQm9fkRJ9G2/u34eFVUW4KI2ve+VkiJM9lOjNk/Mi+SO9UUEhsOw8VEUn6gkY28Fv1wyj6dfX82wAQYOFdoJrFbEJ/oTOTiI4hOV1O715bcPLOX11V/x14sCePjLalb99adEu1l3syvyq36/Sx95hhevjGbR6ycwl5nxD/Q77fiia0mnSTTryA8H+SHPwnv78xs9qwj3C3Qpvgyu9WV8nIFaq2ZslGLvV9uYMGIcO1/YiaXGwtikWOZOH8dYUxaXjgshubCsxdGe+rOwxkWRTR/XD2fPH2LjtR3lfHy0zmUfCfnJLvvvr4WVdVYbn28/xvoDuRTU+hE3fjbDrrkN/8DWF1kWoqcwmYxEhAa2vmETm/emnlZ4HVhlJP2DfIq+KGdoXCR/vfEKXv7oG8bHGLBYNdPijZTZfLFvqmV7Xl7DNodTs/jRaBMzBwfyo9F1vPnpVrcZ0hX5Vb/f+veNjVZs/e8mLr9rcZt/J6JjpNMkmvXTf/6i1W1y0nJ5ZtmvMAfbWTzGh79/X0tF5mF+Mn8mn9zrWMi3/gzpjx4UNjY+C2tcFNn0cf1w9i/mJ7CzpKLVkav+VFiZV1TOuxsPcbzYRmG1YuC08xlxzXImSEdJ9DPuCq+bOpqRz55DxxgUAq9cFsCdn1STW5TLG79+jJGDHMsKFJaaeerVVa1mSFfkV+P91r9vWqIvn3z9HeddcyEhbka7RNcxPvHEE52+0wMFezt/p33Ans37eP0vb7Hq3/9l9+Y9BIcEEzd4QKftv7ykgn8+8gLjZ0/Er5m7z5pu48l73MlJy+VXVz7G3m/3oc3lRAUauP0MX+KDFVYNyXmVXDhjHDc98SpZ+cVMCCjggpGOM8VAXwNFFbXszbMycvAAbnriVS6cMZZAf1/++eEGRvnkcdm4IIoqavn3hlTmD6w97bG7fZ01abjbttbvsy3v6W2OZRXwry/38c+vklmfZSJ01nUMn3sVw2ZcQHTiMExdPHXwtCERv+nSA3QjyS/32pNfx7YfZfGkJI/2X1hqdskCT7fx5H1NHc3IZ8Ytf+CSORP5xfMfEG4v4ZKRPlw22oeSGk25RfPlnmw+2rCbC2eM5c1Pt7rNkK3pVTy78usuzS84PcN+yKxhSKSRAyftjDxjlEefWTQvKiCahJCBHmWYjDR1E3cL5DaddbajGs+ntOi2Sz3axpP3NGaz2jiw9QDv/PEt7JWV5JWaiQiAx8/1w98HZiQaeX57LfbiEw3D0h9m5mMy6NPmJmlc2Fh/Z0jjs6lLRvvzj82pvPhjx10sy6YG8be/pZERHuS2SLK5y23NzY3S0nt6OrvdzrodKXy+N5eCShsB8aMYOutO5ibKJHii83VHfjW9jOXpNp68r6lHX/iASFM1jzz3PjsOn8Bgs/GLOb7szbcyNd7AM9tsVOelMSE+gDfWbPFofqWuyi84PcNyS2sIDg8gvOSgR7ktOo90mrqJuwVyWex4vjNCp+l8SmdfOQ+ttWMm7/9bTkh48GnbTD5/usvjOT86l9CIELf7Tz2QxrHNB7GcLOG8obGYKkv46uYQFr5t5r5zopk36dRiuD8pLaMyamKjYekq3n/qQbTWpxVE1hc23rNmO5WWWpdbbT89bOb6CSaKyszc96GZl5cm8NNzoyBxWsOtt55MH+DJEH1vUGOpY/3OZNbszqXEHsCAyecz8tq7GR/Q9loPIdqiq/Or6WWt+stejYuotdanbePuudby4GhGPvuPpPLR0iCuXJnKTYvOIdp8hHmTTk0V8OPiEl7dWdmw3/rLZ03b0/jmkq7KLzg9w574z7ck3ThfCsG9QDpN3aTpjNzgmG8kOSezU/bfdD6lzR9tADhtVKnxNu8/9XbD48uGV7H5ow1cfPupRW1z0nLZ9cn3GM3VzB0ex/KFZxATEcKPHnmBGyf6MCnOh0h/eHFrCR8ctrq0x6p3sWySj0vhNdBsQeSlI2p48+tdmJS94Wwqu7ACA3Ze2llAYohixnNZRIYEnDZC1VtHizxxPLuQ1dtSOZJTTqUxjNgpFzDp9rMwmWR1c9F9ujq/mrtRo/EoEnDaNu6eay0PHn3hA66fYGJSnA/XTzDx1pfbiAr1dxlFKq+sIdBobVN7JL/6B+k0dRN3M3KXpZsdi+B2kLv5lK5+ZxMGg+LFJqNK9dssnRLA688eZeFdQ9Bac920YK5fuZVRM8eTvj2Z6rxiJgyI4MWr5hAS5N9wrPqztH/c5ig+XHdLKOe8aua9P9zrUjS59JFnWDbVcQa1bGoQi9/dhtGgeLmZgshlU4NYc6yC9596+LTCSsfZXKDLiFXjEaq+VtCdknmSlVtSOJJvwXfASIbOvJkZl/eN2qve4LvnVnu7CT2OqcbGwVeOEhB5amSjutiCqcbW4u8rWttb3XdzN2o0LqJe/vEP2LVm1fVhDdssee/051q7waNpft0z0593D5h57w8PnZZfbW2P5Ff/IJ2mbtLeGbk94W4+pURTNRMGGBk5IMplVKl+G7+6Gq6fYGLtwXKumh7BuoNm/Koq2PbiKp5/YAlD4iPdHqv+LC0+2DF7d3ywkesnmHjkufdZ9ef7gFNnjQBLXsvk5aUJxPjUMHGAkdGxkS6z3jbepn7W29amCoC2n132ZJXVFjYdOMFXezPJq1T4J4xm5Dn3MXdAoreb1i89s+x8bzehx1k4Ko5HVnzC0PMjiEoKpijdzPGP83n2/iUsmjWuQ/tubhHb+owYHevHuYkl7M+3ER0c1bDNuYm1pz3nLkMaa29+NdceCOv3+dXfKK1161u10YqDb3T+TvuAPZv38cXKtRTkFBKTEM3CpQs6pR7gb3f/ifK8U4tiWm12rGXlvLM0hDFjEigyW1n4bDrGoCCMRkcYlBaUUWWxUmdXhAf7Ex7gQ6CfkYTY6BZrgJKueJS6Wstpz/v4+pH+8ZMAXP7w8+ScLKS4oppwUy3FtT4YtJX3rg7mzFFxFJqtzHk+k4DAAMqrLISbaim1+jqGrhsdv/EZX3SwiUKzlSXvlTec4dU/t3Rl67fs9jTlldWs35PBF7tPUO0TQfS42Yw+80KUUr1yosk75w7rfY1uztbnJL/c+Pz7Q/z94w2k5RUzNC6SB6+Y1+EOE5zKi8asNjvVVdVs+fEgooNNbEvO47r3zYSGBGNyZtjJkgrqbJAY7VqH2VKGtSe/Sq2+hAb6uW2PTZmI8q3r9vySmqbONTJyDNPiz/Qow6TT1Ad9/uoahqd9x4/PDW947vmNpRxJnM6g4YPJ2pFCUqg/184ex/ik2C5ZNqPxsPQ17xayZEIgv7owouH1p791FIt/s22vy9B14+B4+p21kL2Th+aeKtD89Wd57M+3serWRJd9uZtBtyfRWpN1spT3tySTnGumyiecuElzGTplTp+oT5JOk+hM7r773fk9b3pZ7bwzJxNUtN+lPU9tcBSLr74pptvzSzpNnastnSa5PNcHNZ7J227XVFbWYrFYCQnazBX3XUlKRgYP/XIZUWFBHM3IZ+FPnmHtcw82XNNvqukyAJ7wZNZbd8XijYPD3W2+J0sqqbPB9Bd6/vQBWmvScop499ujJBfWYowayqhz7mRSVCw+vhJ2QjSnuVv8o7IOs2X/cZcs6ooMO/0mlZ2nTZvSXLF4S5+hN+WXcE86TX3QA8//jGO7j5H6/WHCLLUsOXM0s0YPxN/Ph6ffWUtpflbDF7zxfCX11/Sbaus8KE0LO93NeuuuWLxpEWdvnCrAarXxxc5UvtqXQ3EN+AwYyeh5D3BOTJy3myZEr9Hcd//pd9ayZt1Glyzq7AxzV5i+5pjdbX61NEN4b8wv0TpZzrwPObj1IJ89s4r1T61kZG4BzyyexXN3LmTepKH4+/lQWGrm469/4NHZRj7+5ge+P5DOnkPHiAxQ7Dl0jJRMx9lPYamZJY/+k6KySpf5U9Zs3E5RWWWr7WiusLO+CNLTbXoLrTUfbT7Ej1/6hhtf3MY3dRMZfs3jnHPPU8y6cjkR0mESosOa5ldRWWXDEiiNM6xxftW/ry0Z1t/yS7SNjDT1cieOnODo5oPU5BVz5RnD+dlVswkLDnC77ZufbuXcxFqGhNg4N6GWe/70JmOiFBarZkyUajhTa21ulNbO1DyZgbu3z9J9PKeIj7amkFFqJa9KkXTmIsbcMB0/f/e/eyFExzTNrzfWbGHT3pTTMuycySNbnA+utQzrD/kl2s/jTpNSygjsALK11jJvuxcV5Rax47/fYS0s46xhcfx23ngSY8JbfE/9WdqfzrYxJMLE+AgLr/1QSkiEsWGRyiOHjrHtYHqLc5F4stCtJ8PSvXHo+nBGPqu3HedQbjW+A4YzbNbtTBqYROcsIiG6kuRX79Y0vy4eauX+L7ZSWFLmstDukUPHyM0v5I0lzc8H11qG9dX8Ep2jLSNNPwEOA6GtbSg6X/6JkxzetJ/yjHxGRYXwp4umEtvMkif1GhdIfvLtHs5NrCUpwoi/ycCbe2sZF2NkwXATo6ONLB7jw7rjVu5+8k1+NMrILz89yYxYI6nF2u08KE2XAWhPsXhPV1tn5etdKXyxL5+CKk3QoHEMPesu5sYP8nbTRNtJfvVC9Rl23YIZLvmVFGGkprKcUZFw0XCfhgz7MrWOXHMFv/y0hvMGBzfMrdT0MtoL73/D3mNZLnnVFzNMdD6POk1KqYHAJcDvgYe6tEWiQXlJBds//o6qnEImxkXw0IyRjLp0hsfvb1wgWWOxciClgjWHDBgMcKzQSqif4rGzjacWqfzeRnltEWpEBCWVtQwKMbI3x8qUZ/Ma5kaB0xfabbq8QG8enq6orGH97uN8ujuTKmM4MZPPZ/RNcxjfB6YF6K8kv3qv+gx754tt2K2Whvyy2+FEiZWyasVjZxscGRZn4O/f26m22ogO0GCr41ByOmlZAactkGu17yTSp7bDC/+K/sfTkaa/A48ALQ9tiA4rKSjlyJaD5B/KYFBIAP+74AyGxM1q81lQ00Upr154NvMTq7h/dggZeUWsSoYKcyVjBoYTF+U4+b45K4+d2Va+SjbzP+f48vtNddx6ZhRBQ2e4hEjThXYbLy/QG5cFSM0qYOWWY6TklVMXEMOAKecx4+6f9cpJJoVbkl9e1p5RnMYZdsV7ldw6K4r/OT+cjLwikuKjuPrNXCYOMDJvkuNGi7yicpZNsfPBwToePduX328yc+P0iObz69LQDi38K/qnVjtNSqlLgZNa651KqXktbLccWA5wx+O3cMHVzW4qmqiurObYrhSObzrAsPBgbpg2nJnnTXTZpq1nQU0XpXzzyx+IDvXnn9+XUV1dQ41N4WuE1/cVEBtRAzjmEKm22Lh+oomBITBlALy2o5wJJcktLg3QeHmB3rAsgNaalMwC3tpwhPRSG75xoxg5+y7OkstufU578uulR65h+RVzuqmF/UN7RnEaZ9ji0UZe+b6Ed/eYqa6uISCgmorqOnZmw2dpjmLskyUV1NXZCPOjTfklS5uItvBkpGkOcLlS6mLAHwhVSr2ttb6x8UZa65eBl0FmBPdEVUUVyT8cJX1nMtEmIwvGD+F3917idnSj8S2znpwFNbco5QuP3s1jz7zDi5cOcDuDbWGpmSUP/53rJlYzLMrEdRMN7CkP4LVf33paW+oLKy8Z7c8/Nqfy4o+HAJ4Xi3e3isoavtmXwdYjuWRXgH/iOMYs+jnnhEfJiFLf1ub8khnBO1db8wtOz7BH5gbyyTEz0aGBvHZLy/n1p7M9z6/Gi4l/eG1ow3M9McNEz9DqPE1a68e01gO11knAtcDXTQNHeMZaZyV1TyqfPLuKPa98xvkGzVu3L+D52xdw+azRzf7j7Xpm1PpcIPVnaNGBBo4V1RITaOD6CSbufvLNFvdTf0tv42LL+lt7m7alvrDy08Nmrp9ggrpqoGfNV1JRWcP73x7mnhfXc987B/nWPplBS/+Pufc+xcwrbiU0Ilo6TH2c5Jf3tTW/wH2G/Wi0kTBl7tT8ig42EeNTw7kJtTLnkvCIzNPUxbTWJG8/yrHvD+NbbeHsEfE8cNUcQoL8PXq/uzOj1s6Cdh/N5IfaOl7ZWYpBaexaYVCKOlsxy24e1ux+1v1w2KVY3G6Hgio7E8oONzs/SXZhBQbsLpf5wDvzlWitycgrZs32dPZnlVFpDGPgGecxcdlMfPxk2RIhult78gvcZxhoRkcbm91Pe/ILILuwxuUyXz2Zc0m406ZOk9Z6A7ChS1rSh9jtdvLS8jjwzR7qCspYOG4QDy5pftLJlrQ082xzX+j0j5+ksNTM4of+RpCupEoFsWDOGQQV7W9xP/NnjmV+YpWbRTLHNjzuafOT2O12jmYWsnLzUVKLbJhihzNyzm1MvjBG1ncTLiS/ul978gtOz7Bss4FrJgc1LPrdV/JL9D4y0tSJzGWVbF25EWtBKZMTo/jdvIkkxIS1/sYWtHfm2Tc/3UqMTw1llXVEB9XwoZsFJ5vup7fMcqu15r/fHWXX8UKOFdURmDiGkec/KOu7CdHDdCRTGmdYvD+8tsN22qLfvTG/RO8mnaYOKsor5sCGvZSl5TEw0I8nFk5lcFxkp+2/PWdGhaVmPlq/DaPFwsuXBbH8kypC/PxZ/beHOjyTt7fU1ln579bDfHWwgCKrH0kzLyL2wjHMix7g7aYJIZrR3kxxl2GJkcEtZlhPzi/Rd0inqR1qqmr47qPNVJw4ycjIEH4yewxjFk3zdrMa1J+hnZPkmCn3itE+bMqucTuTd092JCOfnSm5fHO4kEpjKANnLmLyrVMx+fh6u2lCiC7UXIb944Nv2JOS1SvyS/RN0mnyUHlJBYe2HCB3fzoDfE385IJJjLtspreb5daX2w6TklPNz87051BBHWcOVDy/vZrqbYcBevSst/tTc1i1LY2jBbUEJY4hevSlzDx3ktzlJkQ/0lyGpXy1g1i/uh6bX6Lvk05TC+pq6ziyM5nk9btJCgviumnDOfPeSzr9OJ295tFFZ47looFVzBl3qp7qx4VlVEYNP22+FK21V0eezFUW1u9OZePhAvIq7YQMmcSweT9mXmxCt7dFCNF2XbFmm7sMu+dkCa/urOTF62Jc5nuSNeNEd5JOUxPVldUk70jm2NZDRJmMLBw/iCfuuQQfk7HLjtnZax41VxBp1btYNsnntJlwu3vkqbi8krU701i3L5vagGhiJ81jzM3nMFZGk4TodbpizTZ3GYVFMDUAACAASURBVFZeWUOg0XrarN2yZpzoTtJpAmxWGxlHMzmwbhcBVTVcOX0Ef7jn4m65JNSe2XJb464gsn69pWVTHftuPBPuy92w3lJK5kne/fYo6YXV2IPjiJ88j1n3ndUlxxJCdI+uyC84PcPq86vpfE+9fd1L0fv0607T8f3H2b9uF4F1NuYMi+XB6+YS6N+9Rcbu1kHqirOl5mbCnTjAyOjYyE4/tt1u50hGPm9vPEpmOfgljGbMhQ9zVqTMwi1EX+HN/Lp0hKHXrXsper9+1WnSWnMys4B9X+2iNr+E6QOjePXGeQT4eedurPbOltseTYe7rTY75RXVPDonuNOOba6y8MWO42w/lkdOpZHAQeMZe/n/MDQ0vNM+hxCiZ/BmfoEjw6qrinllwaAuP74Q9fpFp6mqoootH26iNreYSXER/N/c8cRHh3plxKNx0WL92RPAktcyeXlpgkez5bZH0+Hup99ZC9k7OXOUo9DS05l6m6qqqWXVlqNsSi6gQoUxaOp5JC6ZwoigkE5tvxDC+3pKfsGpDGvrbONCdESf7TSZy8zsXruT0tRc4v19+OncCUz4kfdraBoXLdafPT2/tZRwUy0znssiMiSgW2awbe/suVprkrMK2bg/k+1ppZiNoQw98yImLTtD1ncToo/rKfkFMgO48I4+1Wmy2+1s/WgTxSk5DA72575zxjF2/pQeU0PTtGjy/aceRGvN0kee4cVLA7lnTRXvP/Vgtwwtt3X23L2puaz6PpVjhXUEJIwmaeYypp6fgMnk00UtFEL0JD0pv0BmABfe0es7TbU1tezdsIfcfWkEWW0smzuec+af4e1mueWuaBLokYWMdVYbn/2QzPbUIlKL6ghLmsTwCx7kXFm2RIh+qTfllxBdpVd2mrTWHP7hCEe+2csAk4HrZo3izDsuwmg0eLtpzXJXNFl/y/+H14Y2POfNQsaKyhrWbEtm/cGTmA2hDJq5kAELRnB+VEy3t0UI0XP0hvwSojv0mk6Ttc7KoR+OkP79EfwttVw8cQg/W3YeoUEB3m6aR1q65d+bhYwHj+fyQ3Iem5ILqQuIJmH6QmbePa3HXNIUQnhfT80vIbpbj+80pR/J4MiG/Vjyi7lh5igeX3Yefr69r47GXdFidmENO7Phs7TuLWTcm5LN+9+lklZiI3DwRGJHXM5ZF07qsuMJIXq3npRfQnhTq50mpZQ/8C3g59z+A631413ZqJy0XHZ+8j0+lTXMHhrLUwvPICaid9/C7s2ixZLyKlKyCvjP1lRyK42EDJvCqEU/Z0hEtNfaJER38EZ+9UVSdC2EgycjTRbgfK21WSnlA2xWSn2utf6+MxtyMquAfet2UZVbxLiYMF5eejZBAf3zFvbOWICysNTMp9uPs/FwPragWMKGTWHk0psZEyi1BqJf6Zb8EqfIArqiL2u106S11oDZ+dDH+aM74+DF+SVs/+931BWUMj42jF+dPZ7BcRH9vp6mPQtQ2u12kjMLeH9rKmkFVdhC4hg09UJmzZve73+fov/qyvwS7skCuqIv86imSSllBHYCI4AXtNbb2nvAilIz+9bvpiglm4FBvjx56UxiI0LkH3antiyAabXa2J+Wx382p5BtVvgPHM/Y+Q9xZkg4BqOxm1suRM/UmfklWtZVC/gK0VN41GnSWtuAKUqpcGCVUmqC1vpA422UUsuB5QB3PH4LF1w9r+G1WkstB7/dT9bOFAYG+nLb7LFMnDcBk0n+YW+qtQUwq/6fvTuPj6o6/zj+OTOTTPaQfQOSQCBsIirgrriAWlGoUtyXWsXaqrXaX9Uudm/tYqtVa0VbxQUVFVxQFKoiAsoimyJ7FhJC9n3PzJzfHzMZMskkmYSZzCR53q+XL8mdO3eeJPj1ueeee25zK29/vp/tueUcbTQSkX4C2fN/wdjIaD9WLUTg6mt+Pf3Tq1g070w/VDr4DdQDfIXwlz7dPae1rlZKrQUuBr7u9NpiYDHAq7uXaEubhV1rd1K0K5cIq4XLTsriktsvxmAI3LWU/K27B2AunD2Tj3bks+FgBfWGKNJnzCH125MYJ42SEB7zNL/Y+LhcvuuHgXyArxD+4sndcwlAmyNwQoELgT/39J73n3ibsIZGFp42gdNvmEWoOdhL5Q5tHddC+bq4lU2Hm2ltbuTy373FJYt+zknnTJbHlgjRB/3JL9E/7tZyknWbxFDjyUhTCrDEMS/AACzTWq/s6Q3/uHymnFn0w4p1uzh0pIY/flJDUHAQIWYzwUGRRAaZyZg4zd/lCTEY9Tm/RP/IA3TFcODJ3XO7gD49zE0aJs/UN7aw/qtcPtlTSn61jRMuv53Lp5/PiLhEf5cmxJDQn/wS/SNrOYnhIOBXBB9qyqvrWbnpAOv2ldMaEkfyyXNInTuOrBGx/i5NCCGEED2QpmkAfHWoiI17i9l4oByiU0k9ZS6nXyCX24QQQojBRJomH7DZbGzfX8gr6w9S1GAkMvNEkrLmc9ZFJ/i7NCGEEEL0kzRNXlJR08C+glKWrjtAlTWUqLGnMGnhDWSHRfi7NCGEEEJ4gTRNx6G8up63Pj/A5wcrsYQnEj16IpO/uwhTkCyxIMRgdcdzX/q7BOFlZksdj942y99liCFAmqY+sFislNc08ObG/XyZV4slIomMGXOZef4J8tgSIYaIk294yN8lCC/b+sLv/F2CGCKkaeqFxWLlywNHWf7FQYoaDIQmjyP1hIWcOme8NEpCCCHEMCJNkxvNLW288dkedh6uoqjRyIgxJzFu3s/Jihrh79KEEEII4SfSNDnUN7awbN03bDxYSZ0xmjFnzCXt5LGMl0ZJCCGEEAzzpmnrvkI27z/KlrxaWkLiGXvmt5lx/kR5qLAQQgghuhh2TdPnX+fyztYC8mpsxIw9maQJ53HGJeP9XZYQQgghAtyQb5oqahrYdfAI724/QlGDgbgJpzF27jWMGRHn79KEEEIIMYgMyabpaHkNK7ccYuP+CqyRycRPOovMhbcxURaaFEIIIUQ/DYmmSWvNN7nFfPzVEbbkVmGMHc3IU+Zxxuyp/i5NCCGEEEPEoG2a2ixWvtxXwNLPDlDZFkJE5jRSJl7F2ZdO8HdpQgghhBiCBlXTVFHTwK7cUpZ/fpBqWxhRWdOZfMOtBJnN/i5NCCGEEENcr02TUmoU8AKQDNiAxVrrx3xdWLvK2gaWrdvLlrwaWsMSiU2fyMQbb5NGSQjRK3/nlxBiaPFkpMkC3Ke13qaUigS+VEqt0Vp/44uCLBYrlXWNvPbpHrYV1GOJSGbM6Vcy48JsjKZBNTAmhPC/Ac0vIcTQ1msXorU+Chx1/LlOKbUHSAO8Fjo2m42N3xTy7tY8CusUoQmjGT3jRk77VqYsNCmE6LeByC8hxPDRp6EbpVQGcBKwyRsf/tH2HFZtK+BIg4H47BmMuexqsqJjvHFov/vTnddQX1/XZXtERCQPPvGK194jhPCMt/NrKJP8EsI9j5smpVQE8CZwj9a61s3ri4BFAE//9CoWzTuzyzHKqupYsXEfnx+qocUcQ+K4k8i44kayI6P7/x0EqPr6Osbc+niX7TnP3uXV9wgheteX/Lr+vt9zzuXXDHCFgUXySwj3PGqalFJB2APnZa31cnf7aK0XA4sB2Pi4bt++6Zt8Nu8vYWt+LTo6jYzTFnLGRROPv3IhhPBAX/PrmXU52t0+Qgjhyd1zCvgPsEdr/XdPDrphVw7LNx+mqEExYvwMkidfyFlz5fluQoiB1Z/8EkKI7ngy0nQmcAPwlVJqh2Pbz7TW73f3hpdL0pnwnZuZEB7pjRqFEKK/+pxfQgjRHU/unlsPqL4c9KTzLu93QcNZTUU5R/IOuN3ek7svPxWLresVBZNB8c933M95lUmbYjjoT36J/hnI/ALJMOEfsvCRj0RERLqdABkR0f3om7ZZqFjZ9QqCtll6/CyLTZN+5wtdtuc/cWO375FJm0KI7gR6foFkmPAPaZp8pD9nOiMSkiUEhBB+J/klhHvSNHmBu2HiiqOFKIMRZTS6bO845HzHRdPQHV7XFgsVf1gAaMzRic7tlrqeh7eFEKK/JL+E8Jw0TV7gbpi4+tFbSbj8fkITR7ts7zjkrI1GRt35kvPro8/fQ8rNj9JakkNYyli37xFCCG+S/BLCc9I09ZG7s7Kq8lKKC3JIHjWm2/cVvnQ/urUJm9XKD+bOtG/U0FZRiCk21f6lzYqtrQWAxtJ8+z5WCzarxfkeW1srGI0oIDYxxb7NaiH3sWsxhruupm5rbem2nv5O2hRCDG6dM6yqvJQjeQcwGo3dZpi38gvsGaaMRpf8ynv8BrTN6pJhPeUXSIYJ/5CmqY/cnZXteuIOrFZrj+/TrU2k3PwYraW5hCXbg+nwsz9EaxtKtT9fT6NMwWA0YYpOcr5XGYzOiZIFz/2IlJsepbU0l9FZ9kVCTQe+ofy9vxN/+U9dPrPklQe7r6efkzaFEINb5wzb9cQdmONH01J+uNv3eCu/wJ5hcZf8yCW/TLFpFL94n0uG9ZRfIBkm/EOapgBjX4sPDEFmAOeZWy9vQhlNhCWmu2w2GLv/9cqkTSGEt/U3vwxB5i4Z1lN+gWSY8A9pmjrwZN2PiqMFVP7pKpfXtc1K6cpHSbvzSec2a0MVZe/8ucPX1RQ+eSPaZsUUGW/fVldO+Tt/QRnsvwZbUy1Hn78HjEaSr/+by2c0ldrPArXVYr9Ep7t/0sORlx/E1tqIzWbl5zfPdft9CCGGnv5kmLZZOfzsD1EmszPDfJVfYM+wnvILHBnWKb86fx9C+IM0TR14tO6H0cTIHy5xeb2topDipQ90OcOJn3sfGBx3l1gtBCVkcPSFH5Ny82MAFD55E/GX3uvcX6NRykDJa7+k+IUf27dZLWibjaD4UQAoowlDUHCPy/XZWhu7XMLr8n0IIYac/mRYW0UhWtsoeeVnLvv5Ir/AnmG9LTdqa20kaeHvXPKry/chhB9I09RBd6NIWC3Hzng0tJYfRikDQXEjAQhyTIR0oRTBsWmYgu3D1I3FOSiDodM+jn87hqGV1QIGI8poIuWmRwFoLcmhYtU/ey5ca7TV4jybs9SWUfT8j8BmpdoU5NxNbv0VYmhzN4oEdMmwtopCl/zqMu4zwPlla2vtkmHlKx9xyS+QDBP+J01TR25GkVrLD1P+zl+cZ2/Vj95KUNxI+9mZpRUArTUoXM7wKv90VZcgsrW1gM1Ka2kujjdiik3jWPrYAIW2Wih4/HrHNvtRiv57J2AfNm8fJrck2Zu1ipIigA7D6Yrk6/4CQLA5xPn5HW/97c+Kv0KIANcpw1rLDxMcP5qi/97pkmFa21zzC+WSYb7KL7BnWMlrv3TJL2Uwom1W1wy74RGX/ALJMOF/0jT1g1IGFIogx1lYW2sLShl7fpPB6Jwc2X73ifOloGDAHkqGIDPKYCDjrheBrneatMt59i7+8PxKAH5+81yXhm3rn67CEGR2hqI7Mi9AiOGrS36ZgnvOsAHML7BnWPuk8u5Ihgl/kKYpANjaWjv8uQVts/mlDnkAphCirwZDfs2eOckPFYmhSJqmDhSqx9EZAENwCEX/vRNrYzUGxyRJm83aYa2SdprCf99ybB+rBVBgtTiHmG1trRS/eB/KsU/7cLjutE/psl9icSwE167HIWilaCsvQNss2EzHfsWGLjW6kgdgCjG4ucswrV2bGENwCMWvPtgpv4zgckHOj/kF9gyrPOKSX9Bzhkl+iYEwrJumzmcm2vGfvQLnBEhLp9s8pv3gCaD34eXpDy5z2cedH8ydyehbn+yyPf+JG/nXys19/4YcgiNiCE20L1aXljHOub0lLr7fxxRCBBZ3Iysa3SW/Op/QTfvBEwGdX2DPMKPJ5JJfIBkm/G9YN02dz0wq/7SQI0/fhtZW5xmWtlrRNmuXs5WOZ0r9nZCotI2i5+9xu70vOn++pa6c/CduxKAMLiEjEySFGDrcjaxU/mkhBR1GiLTVilaA1TXDAjm/wJ5hpa891KVJkgwT/tZr06SU+i8wFyjVWk/xfUn+M/3BZQC9nmF11t/5Pt5a0VbmGwnRveGWYZJfQviOJyNNzwNPAC/0st+wIJOlhRh0nkcyzEkyTIj+67Vp0lqvU0pl+L6UwcGbkw0DbZ2RQKtHCG+QDHPlrQwLtLwItHrE0DSs5zT5W6Cd1QVaPUKIwBVoedFTPVtf+N0AViKGMq81TUqpRcAigOvv+z3nXH6Ntw7tM3JmIoQAyS8hhGe81jRprRcDiwGeWZfT8yOsA0SgnSkJIfxD8ksI4YmeVzsUQgghhBCAZ0sOvALMAuKVUoXAr7TW//F1YYFKhsSFGFwkw1xJhgnRf57cPRf4F/cHkAyJCzG4SIa5kgwTov/k8pwQQgghhAekaRJCCCGE8IA0TUIIIYQQHpCmSQghhBDCA9I0CSGEEEJ4QJomIYQQQggPSNMkhBBCCOEBaZqEEEIIITwgTZMQQgghhAekaRJCCCGE8IA0TUIIIYQQHpCmSQghhBDCA9I0CSGEEEJ4QJomIYQQQggPSNMkhBBCCOEBaZqEEEIIITzgUdOklLpYKbVPKXVQKfWAr4sSQghvkfwSQnhLr02TUsoIPAlcAkwCrlFKTfJ1YUIIcbwkv4QQ3uTJSNNM4KDWOkdr3Qq8CszzbVlCCOEVkl9CCK8xebBPGlDQ4etC4NSe3jAhJfJ4ahJCCG+R/BKo006DpHH+LsNrTj6ljbj48QSbg/1dypCQEJbo8b6eNE3KzTbdZSelFgGLHF/errVe7HEVXqSUWuSvzz4eg7FuqXlgDMaaA8igyq/2Wgbb7zvQaz573O1dtgV6zd0ZjHUPxpq748nluUJgVIevRwJFnXfSWi/WWk93/OPPH86i3ncJSIOxbql5YAzGmgPFYMsvGJy/b6l54AzGugdjzW550jRtAcYppTKVUsHA1cA7vi1LCCG8QvJLCOE1vV6e01pblFJ3Ah8CRuC/WuvdPq9MCCGOk+SXEMKbPJnThNb6feB9H9fiLf4eWu+vwVi31DwwBmPNAWOQ5RcMzt+31DxwBmPdg7Fmt5TWXeZECiGEEEKITuQxKkIIIYQQHhhyTZNSyqiU2q6UWunvWjyhlMpTSn2llNqhlNrq73o8oZQaoZR6Qym1Vym1Ryl1ur9r6o1SKtvxM27/p1YpdY+/6+qNUurHSqndSqmvlVKvKKVC/F2T8J3Bll8gGTYQJL8Cx5C7PKeUuheYDkRpref6u57eKKXygOla63J/1+IppdQS4DOt9bOOO5LCtNbV/q7LU45HaxwBTtVa5/u7nu4opdKA9cAkrXWTUmoZ8L7W+nn/ViZ8ZbDlF0iGDTTJL/8aUiNNSqmRwKXAs/6uZahSSkUB5wD/AdBatw6WsOngAuBQIAdOByYgVCllAsJws8aQGBokvwbGEMgwyS8/GlJNE/Ao8FPA5u9C+kADq5VSXzpWJQ50Y4Ay4DnHZYRnlVLh/i6qj64GXvF3Eb3RWh8B/gYcBo4CNVrr1f6tSvjQYMwvkAwbaJJffjRkmial1FygVGv9pb9r6aMztdYnY38K+w+VUuf4u6BemICTgae01icBDcAD/i3Jc46h+MuB1/1dS2+UUjHYHy6bCaQC4Uqp6/1blfCFQZxfIBk2YCS//G/INE3AmcDljuvrrwLnK6Ve8m9JvdNaFzn+XQqswP5U9kBWCBRqrTc5vn4DewANFpcA27TWJf4uxAMXArla6zKtdRuwHDjDzzUJ3xiU+QWSYQNM8svPhkzTpLV+UGs9UmudgX348mOtdUB3tUqpcKVUZPufgTnA1/6tqmda62KgQCmV7dh0AfCNH0vqq2sYBEPbDoeB05RSYUophf1nvcfPNQkfGIz5BZJhfiD55WcerQgufCYJWGH/+4QJWKq1/sC/JXnkLuBlx1BxDvBdP9fjEaVUGDAb6PrI8wCktd6klHoD2AZYgO0MoZV1xZAgGTZAJL8Cw5BbckAIIYQQwheGzOU5IYQQQghfkqZJCCGEEMID0jQJIYQQQnhAmiYhhBBCCA9I0ySEEEII4QFpmoQQQgghPCBNkxBCCCGEB6RpGsaUUj9TSskT1YUQQggPSNM0iCml8pRSJR2f0K2UulUptdaT92ut/6i1vtUHda1VSjUrpeqVUjVKqXVKqRO8/TlCCNEbpdRZSqmNjiyqVEptUEqdrZRqaH8ETKf9tyul7lRKZSiltFJqW6fX45VSrY7nBIphRpqmwc8E/MjfRbhxp9Y6AogD1gIv+rccIcRwo5SKAlYCjwOxQBrwG6AG+4N7r+y0/xRgEq7Pdwt3bG93LZDrw7JFAJOmafD7K/ATpdQIdy8qpR5TShUopWqVUl8qpc7u8Nqv25+krpT6QCl1Z6f37lRKXeH48wSl1BrHmdo+pdRCT4rTWluwP7V9UofjzlRKfa6UqlZKHVVKPeF4BhRKqSeVUo90quNdpdQ9jj+nKqXeVEqVKaVylVJ3dzruVsf3WqKU+rsnNQohhqzxAFrrV7TWVq11k9Z6tdZ6F7AEuLHT/jcC72mtKzpsexG4qdM+L/iyaBG4pGka/LZiH8n5STevbwGmYT/LWgq8rpQKcbPfUuxP0AZAKTUJSAfec1z+W+PYJ9Gx37+UUpN7K87RDF0HfNFhsxX4MRAPnI796dc/cLy2BLhGKWVwvD/e8forjm3vAjuxnzFeANyjlLrI8d7HgMe01lHAWGBZb/UJIYa0/YBVKbVEKXWJUiqmw2svAmcrpUYDOPLlWro2RC8BVyuljEqpiUAksGkAahcBSJqmoeEh4C6lVELnF7TWL2mtK7TWFq31I4AZyHZzjBXANKVUuuPr64DlWusWYC6Qp7V+znGcbcCbwIIeavqnUqoaqAfuxD4k3l7Tl1rrLxzHygOeBs51vLYZ+9D5BY7drwbWaq1LgBlAgtb6t1rrVq11DvCMYx+ANiBLKRWvta7XWnds1IQQw4zWuhY4C9DYs6JMKfWOUipJa10AfApc79j9AiAEeK/TYQqBfcCF2EecZJRpGJOmaQjQWn+N/br9A51fU0rdp5Ta45gEWQ1EYx/h6XyMOuxh0d6AXA287PhzOnCq43JateM41wHJPZR1t9Z6BPYQmgu8oZSa6qhpvFJqpVKqWClVC/yxU01LOBZk13NsPlQ6kNqpjp8BSY7Xv4d9OH6vUmqLUmpuD/UJIYYBrfUerfXNWuuRwBQgFXjU8XLHS3Q3AEu11m1uDvMCcDP2UfaXfFuxCGTSNA0dvwJuw37ZCgDH/KX7gYVAjKOJqQFUN8d4BfulsdOBUOATx/YC4FOt9YgO/0Rore/orSittU1r/RlwEJjj2PwUsBcY57iU9rNONb0EzFNKnQhMBN7qUEdupzoitdbfcnzWAa31NdgvIf4Ze6MWjhBCAFrrvcDz2JsngOVAmlLqPOAKuh9FehO4FMjRWuf7uk4RuKRpGiK01geB14C7O2yOBCxAGWBSSj0ERPVwmPexj+b8FnhNa21zbF8JjFdK3aCUCnL8M8Nxfb9XjiZsErC7Q121QL1SagLg0nxprQuxz8V6EXhTa93keGkzUKuUul8pFeqYYzBFKTXD8TnXK6USHHVXO95j9aRGIcTQ47iB5T6l1EjH16OwjxZ9AaC1bgDeAJ4D8rXWW90dx7Hf+YDXl2gRg4s0TUPLb4GOIysfAquwT4bMB5qxj9a45Zi/tBz7tfulHbbXYR8luhooAoqxj+SYe6jlCcc6TfXYm59faK1XOV77CfYJl3XY5xm85ub9S4AT6LBUgdbaClyGfWJ7LlAOPIv9kiPAxcBux2c+BlyttW7uoUYhxNBWB5wKbFJKNWBvlr4G7uuwzxLsJ4s9zlXSWm/VWh/yVaFicFBaa3/XIEQXSqlzsF+my+gw4iWEEEL4jYw0iYCjlArCvmDns9IwCSGECBTSNImA4pgnVQ2kcOwOFyGEEMLv5PKcEEIIIYQHZKRJCCGEEMID0jQJIYQQQnjA5IuDvrP/DbnmJ8Qwcvn4Bd0tmDroSH4JMbxkjsjihMRpHmWYT5qmxrYGXxxWCCF8TvJLiOGl1dri8b5yeU4IIYQQwgPSNAkhhBBCeECaJiGEEEIID/hkTpM7SivCicJsMKMIvDmjGk2LrYUGatFK5oEKIY6R/BJCwAA2TeFEERUWBQZNAGYOaDDbzNAI9dT4uxohRACR/BJCwABenjMbzIEbOGCvy6DtdQohRAeSX0IIGMCmSaECN3DaKQJy6F0I4V+SX0IIGGYTwTet3cz159/MtefeyMv/esXf5QghRJ9IhgnhX8OmabJarTz60OP85fk/smTNf/jonU/IO5Dv77KEEMIjkmFC+N+waZr27NhHWnoqqaNTCQoO4vzLZrF+9QZ/lyWEEB6RDBPC/wbs7rm++P6C+6iubuyyfcSIMP79xiP9OmZ5STmJqYnOrxNSEtizY2+/axRCCHd8kV8gGSZEIAjIpqm6upHx33+0y/b9/76n38fU2s3aJTJnUgjhZb7IL5AMEyIQDJvLcwnJCZQWlTq/LjtaRnxinB8rEkIIz0mGCeF/w6ZpmnBiNoV5RzhacJS21jY+fnctZ84+w99lCSGERyTDhPC/gLw85wsmk5F7fnsXP7nxAWxWG99aeDGZ4zP8XZYQQnhEMkwI/xs2TRPAaeedymnnnervMoQQol8kw4Twr4BsmkaMCHM7aXLEiDA/VCOEEJ6T/BJi6Oq1aVJKZQOvddg0BnhIa9319hAvOZ7bcoUQop3klxDCm3ptmrTW+4BpAEopI3AEWOHjuoQQ4rhJfgkhvKmvd89dABzSWsva/UKIwUbySwhxXPo6p+lqQJ4SKUQf7Vi/iw+WraasqJyE1HguXjiHaWdN9XdZw43klxD9IPl1jMcjTUqpYOBy4PVuXl+klNqqlNr60etrvVSeEIPfjvW7eO25ZcTMDmXGryYSMzuU155bxo71u/xd2rAhsuaxSQAAIABJREFU+SVE/0h+uerL5blLgG1a6xJ3L2qtF2utp2utp1/wnVleKc7bHv6/vzLvlAXcPOdWf5cihpEPlq0mY34KMWOjMBgNxIyNImN+Ch8sW+3v0oYTyS8h+kHyy1VfmqZrGORD25csuIi/LvmTv8sQw0xZUTnRGREu26IzIigrKvdTRcOS5JcQ/SD55cqjpkkpFQbMBpb7thzfOvHUqURGR/q7DDHMJKTGU5NX77KtJq+ehNR4P1U0vEh+CdF/kl+uPJoIrrVuBAb8yZDVlTX84/4/c+9f7ic6JnqgP14Ir7h44Rye+fN/sJmttDVYCAo3YWgxctv93/N3acOC5JcQ/Sf55SqgH9i75vX3sRTsZ/Wy9/1dihD9lrM7lzbdRsqFsYxfNJKUC2Np023k7M71d2nChyS/xFAg+eUqYJum6soatry/hseuTGHL+2uoqarxd0lC9Muatz5izLUpJM2MJTo9gqSZsYy5NoU1b33k79KEj0h+iaFC8stVQD57DuxnaZdlKcYlhXBZViOrl73Pd26/xt9lCdGrzmuaNNU3EZ3VaSJlVgTNDQV+qlD4muSXGMw6ZlhjbSORma7PTRzO+RWQI03tZ2nXnRIFwHWnRHnlbO03d/2BH1xxN4dzClhw2tW899oqb5QrhJO7NU1QUH2g00TKg/WEhIf4qUrhS5JfYjDrnGGmUCPVB+qwWW3OfYZzfgXkSFP7WVpchL28uAgTl2Wp4z5b+9XjP/dWiUK41XFNE4CYsVGkTE8gZ2kR6rpUorMiqDlYT+6rR7lo/kV+rlb4guSXGMw6Z1j6uankLSuGqxSx2VHDPr8CsmnauXEbnxQ188quIpftseXbZIhbBLSyonIyMia6bDvxu9l8dNcW8l8so7mhgJDwEC6afxFX3D7PT1UKX5L8EoNZ5wybcOUYtE1zYHEhSqlhn18B2TT9fslf/V2CEP3SvqZJ+1ka2Nc0GTs1gwf++RM/ViYGiuSXGMzcZVjyyQmYKyIkwwjQpkmIwerihXN47bllMN++am5NXj15bx3lqu8u7PW98lBMIYS/9TfDhkt+SdMkhBe1h8QHy1azv6iAhNR4rvruwl7Do33yZcb8FDIyJlKTV28Prg7HFEIIX+tPhg2n/JKmSYge9OfsadpZU/scFO4mkDPfvn2ohY4QYmD0d/Snrxk2nPJLmiYhujGQZ0/uJpBHZ0Swv2h4roUihDg+kl++MWyaptKiUv5w75+pLKvCYFBcds2lLLjlCn+XJQLYQJ49dTeBfLg+FFN0JRkm+kLyyzcCcnFLXzCajPzwF9/nxY/+y1MrHmfFi2+TdyDf32WJAFZWVE50RqeVvDMiKCsq9/pnXbxwDnlvHaXqUC02q42qQ7XkvXWUixfO8fpnicFJMkz0heSXbwybkaa4xDjiEu0POg+LCCN97GjKisvJGJfu58pEoBrIs6f+TiAXw4dkmOgLyS/fCNim6Yu1m3lz6ZscLSgmZVQyV157JafNmumVYx8tKObANweZNG2CV44nhobOkybHTx7Hlre2uNx6u+f5XCIiIrhvwQN9mljpyYTM/kwgF4HJl/kFkmHCvY45ExoeQvHzxUy8OdNl6YAZM2fw8N1/69PkcMmvYwKyafpi7WaeeXoxGfNSGZ05hercOp55ejHAcQdPY0MTD93xG+566AeER4Z7o1wxBLibNLnlrS3MmDmD/WsOsL+oALPZjDHEwKgFCc4Q8mRi5XC6HVf4Nr9AMky45y5n9r/aSP6rpbS02Ed/ZsycwZbNW/qURZJfrjya06SUGqGUekMptVcptUcpdbovi3pz6ZtkzEslNisag9FAbFY0GfNSeXPpm8d1XEubhYe+/2sunH8B51x8tpeqFUNBx0mTBqOBmLFRZMxPYf/uAzzwz5/wyBsPExUXyfir07vs88Gy1f06dm/vE94xVPILJMNE99zlzPir04mKi+SRNx7mgX/+hP27D/Q5iyS/XHk60vQY8IHWeoFSKhgI82FNHC0oZnTmFJdtIzIj2VfQ/0mPWmv+fP/fSM9K56pbFxxviaKPAn212LKickKrE/nikZ00ljUTlhBC+qxUl0mT/b2tdjjdjhugBn1+gWSYvw3XDJP8ctVr06SUigLOAW4G0Fq3Aq2+LCplVDLVuXXEZkU7t1Xn1pEyKrnfx/xq69esXv4/xkzI5HuX3A7AbT+9hdPOO/W46xU9GwzDu2azmf3v5ZGxIJmI9FDq85vY/0Ye0eYY5z79nVg5nG7HDTRDJb9AMsyfhnOGSX658mSkaQxQBjynlDoR+BL4kda6wVdFXXntlfY5APPsZ2jVuXXkvV3Ebbcv6vcxp844gU/z/ufFKoWnfL1eSH/PADu+r666jlHfiScs1YwyKMJSzSSfF0PLRuXcv7/PZDqe59GJ4zYk8gskw/zJlxnmjfxKSI2npaWF5NkxXs8wyS9XnjRNJuBk4C6t9Sal1GPAA8AvO+6klFoELAK49Vc3c8F3ZvW7qPbJkm8ufZN9BfmkjErmttsXefXuEzFwfDm8298zwM7v+/j+TYQmmWmtsqBtrZiCgkibmsruNbnO9/T3ttrhdDtuAJL8EsfNVxnmrfyqyatn39+qGDduMq21rbS0NXktwyS/XHnSNBUChVrrTY6v38AeOi601ouBxQCv7l6ij7ew02bNlJAZInw5vNvfM8DO74tICcPSaCU4NpjkzFQAqg7Vdqmxv7fVDpfbcQOQ5Jc4br7KMG/lV8zYKCLTQqk8VEPWmRnO/byVYZJfx/TaNGmti5VSBUqpbK31PuAC4BvflyaGir4M73oyVN1xn+qKGs68ZprL6/2ZnJ1xfhq7XzuIMsHutlzMEUEYWkzc/OMbj+M7F/4m+SW8wdMM81d+AYy9ZDQ7n99H8ScVtNS3SYb5iKd3z90FvOy48yQH+G5fP0ijQQOq1139RzvqFF7l6fCuJ0PVnffZ+KftHNlVxKiT0giLCAX6PznbEKRIPi+GiLRQWsqsVKyt89rPQPiV5Jc4Lp5kmL/zq6G0iaAwE8nnxWFOMEqG+YhHTZPWegcw/Xg+qMXWgtlmBoMOzODRgE3RYmsJzPoGOU+Gdz0Zqv5g2WqItrHl8a9oa7JiDFI0rmjGaDAx5rTRHq962/nM8eB7+aTOjjsWXuMhLrnWJw+3FANL8kt4Q28Z5s/8qsmrJ/fDI0z5XhYpJyTYd5IM84kBWxG8gVpoBLPBjArA/6o1mhZbi71O4ReeTLY8tCsPIq2MuSmFqLFh1B5qJGfpUfY9d5iKDxs8XvW285ljQ0UL06amOs/23H22GL4kv0Rv/JlfCanxhASHkDQprsfPF8dvwJomrTT11FAfyKPHgZeFw4onky0tupVxV6cRnW1/fER0djhjrk3hwOIjPPLGwwA8fPffPJpc2fHM8eG7/0ZbuQWOLa0zrNciEa4kv0Rv/Jlf7e+T9ZR8LyCfPScGF08mPy5/+m3WvPURzQ3NhISHMHv+BQAu26acNJm8twp6nGypbZrQpGCaSlrQVo0yKkKTgtG2Y/8368/twbIWiRDDV38yLC0tlSNHigIiv0AybKBI0ySOiyeTH5c//TYfvvchmTekEJ0VQc3Bela99AHWFhvjvzfSuW3XqzuZOuVEqtbUdTvZMtgcTO3BRmKmRGIIVthaNVVf1xFsDnbu05/bg2UtEiGGp/5kWPmuavKW55F4RgyjLsr0e351rFUyzLekaRLHxZPJj2ve+ojMG1KImRBp32dCJGmXxlHwXhlFa8rJeaWIkPhg4mZGs/PTXYyZmNnt50VERHD0o0rMMcFEZITQeKSFox9VYmm1cN+CB0hIjWf85HFseWtLn8+4ZC0SIYaf/mRYSHwQ8adHU7yugvLN1QGRXyAZNhCkaRLHxZOh5OaGZqKzIlz2aauzoJQi9VvxRIwOof5wMwVvldJY20TM7NBuz/iUSTFmzigOv1tEc2Ur5pggEmaOoOSTKmb8yv6eLW9tYcbMGexfc0DOuIQQPepPhtXsb6B2XyOjv51I/CnRkl/DiDRN4rh4MpQcEh5CzcF651kaQPmWWkZelkBkpv1utcjMUFIviaNhcXOPZ3wJqfFEJ4dyzs9mAFB8uITGyiYiUsIwGA3O9+xfc4AH/vkTn3//QojBrT8ZVrmjjrRL4zHHBKGMSvJrGDH4uwAxuF28cA55bx2l6lAtNquNqkO15L11lPGTx/Hw3X/jvgUPEBYexoH/FlK1tw6bRVO1t47mklZCE4KxNttAg7XZRlCYEUOQ61/J6IwIyorKu/28mtw6jqyqIOP8NJf3FOw/4vz8h+/+GzvW7xqwn4kQYvBwl2F7ns+lvqbemR9TTppM7qtHnRnWUtFGUKgRo9ko+TXMyEiTOC7uJh+6W2dk9wutHHzmKFarhZDwEOKT4zE2BNFa0YbNpjEYFM3FFiKSQl2O3/mMr/PnNTY0MurCJJJPOrZPzupCbGZLj8PkQggBXTPFbDZjDDEwakFChzlFBUydciJfv7ib5oYCUNBWpjEYrTTUWiS/hhFpmsRxc7deSOeJlZNvHEvVmibnkPOO9bv472PPEzcrnJCkYBqKWij9Xw1BhmCqDtX2OAGy4+e13/lSNb62y8q4fX0IphBieOq8ZlvM7MQu+VG1po6nVv0TkPwazqRpEl7nycTKnN25NFQ10LauFUujFVOYkda6NibNnNzjLbudycq4QghvkvwSPZGmSXidJxMr17z1EeNuGekyObxqbx1fv7jbeTbnKVkZVwjhLZJfoifSNAmv82RlWnfLEERnRdBQnd/jgyqh99V7ZWVcIUR/SX6JnkjTJLzOk5Vp3S1DULC6FHNMUI8TID1ZvVdWxhVC9JfkV/eOHCqiqqzK/oWGIztzMVhtzte11lhr6omLDPPK5zW2tFFvMGAOObZiOiHBpEweZf+zUmSdMJaQMLNXPs8TQ75pqq+ppyjnqMu28vxS6gorUG4ecNlaXUdClHd+4Z4qr2siKDrC7WsRKTHEZya7bEvJTCFyhPv9j9eTP3uarRu+xGqxYTQZGDtuDKYwU5ezotqqOp576Blu+d0it7V0HnLesX6XyxnYlJMms+vVnXA1zseoHP2onOzr02kJaqIorxZTUBDJc2JdJkB6snqvEGJ46pxf0888hTO/dXqXkZ0xkzP7lF/gOkLUvozKuFtGDpn8qiqrpqywDEubhbxN+zAZDGgN1po64hz/TxwVHcac9ATneyZcfBKJMZHdHdInCkuryCmqAKDNYuWdp1diMBkorW3EPCISi9VK6rSxhEWGMjp7FCFhIV79/EHZNFUcraC2qg6AkoNHqS4oc77WXF1PUoemx2izceHENJcHgCckRzP9vBMGqtzjsm1fASVllc6vNfDJ+q9p7fAdldU1Euxoumw2G+mnZhMWYb/1NSUj2eO/NE/+7Gm279zOuEVpRI0N48iacvI+zyPrypHMuM31rOjo/sMYivJYv3wtl9wyt8fjuju76nwLb0h4CCYVRHCiEWOUItgcirXFSktbC0dyjv1+PZmk6cnZnBBiaOmcX7WHGtny3618s2c3U2/JdsmCsWnpHucXuM+UzsuoDIb8OnKoiJbmVnI278PS2EJjbQPJ4SEopYgwGjgrKwmF4oGFZxHWcXQngIxMjGFkYozz6wumZ7u8brXa+GjbAVpLGvlw7U4aWy20mIMJiQpjzKkTiE0awYj4Ef3+fI+aJqVUHlAHWAGL1np6vz+xFzabjaO5xWg0NSXV5G07gEEpmmsanM1QTJCJE0fZ7y6YlRDB6edO9lU5fndy9qgu27516kQ3e9q1Ways2rwPS3k1Nptm7QdbMQTbf80ltY2ERIejNYyaNoaYlFhCw0KIS7H/LLdu+JJxi9KIzg4HoD6niczrkjGE4bJa7cqlqwipqubJKxL54coNnHXFrB5Hvro7u+p4Cy/AHZfcTWudhfA0e8NnCjFSV2fBYrU49/FkkmYgn82JgTeQ+SX8p3N+RWeHExofTMLZUS5ZYLvMyrbHt7HqljSP8gvcZ0rnZVQCJb9qK2upraqj+EARZYeKaKxpJDkyFK0142LCyYyL5NrTs8lIie31WIOR0Whgzgx7IzX39EnO7flHK/ky5yhbPt3JF7VNWELNjDoxk5HZoxjXhx9FX0aaztNal/e+W+9sVptzldRDWw9QX1ZDU2UtcRGhtDa3Mn1kHGFmE1khwfz2+vNQ7q6jCbeCTEYuP+PYX5QFZ0/pso/Wmnc37eWzN79ixfqdlNfUExkZRmtdGzV7G2gqbmHExAgai1uJGhtGY1GL873RGRF8ue8bksM01y2txmBQvPb3V7j1t7d1W1NZUTmh1Yl88chOGsuaCUsIIX1WqstKuQAGbeTIqgqMwUYi0kOpz2/iyKoKDNro3Kd9kqTtMiuWoBZMbSEcfrfEZZKkJ2dzYtjxWn6JwNHxkpm1zep8LFM7S6ONkCTXERNbqIXm1jauW3rEo/wCzzJsIPPL0mahoriS+poGDm74BqUUzRU1xEaGEaY1U0fGMj0uilnXn+f5D3OIS0+JJT0llivOPDbIsmrzPja/s5F9RYpTbjzVo+P49PKc1tp+nTS/lMLd+TRU1THCaKSlpZWTUmMwB5m4PjOZaWdPxGgwYDTKU10GglKKIDQf7NnN5FvSiMuIoCKvnpK/VRKZFUpwpInaAw1g0xx47gjYwLpPoYwKwqG1tYUxt4wkMSOMsvwmNi3dzucfbOb0i2e6/Tyz2cz+9/LIWJDsDJP9b+QRbY5x2W/U+DRs6S0cXVXpDKaUaQkY8o9N8ms/03r5saVUF1cwIjmO6350rcsZmCdnc0KIwa3zZawPf1RJzcEGRkyIRDn+V2IKM9Bc0gqO/0/W1zRQfqiKqFQzZ9yd7lF+gWcZ5ov8amuyYGmyUvR5CQYLvP3I68QEB9PW1Mz00QlEBxm5Z95MgkxGTEYDBoP8P7QvLpmZzSUzsyHZ8ysQnjZNGlitlNLA01rrxT3tvHbpx9QXlBGkFJMSohgXF8E9s08kyGQM2Oukw8GqL77h0bfXkltcSU1jE5kXJJLgaCwSxkYxclocea+VMOa6FBJPHUFrrYWyz2uYcEUqEyYlUnawls/+s5+Y0SEUbq2jcGsd4XHBhCWZePbh//L6M2+SmJbQ5RZag0mRfE4MwYlG2lrbCE40knxeDC0bXUcQ28/CsudndrrV9jKX/cZMziTJpHjj++n8cGUDY6eMcXucns7mxLDSp/wSganzrfq1FXVkXH3sMtbIs5LIX1aCWqiIHh9O7aFGmspbKfuolqp0+4rbR3YcpWRtJSfPicdgVCSNCWPiFQksfewVPn1/XbfLAHiSYf3Nr4wJ6dTX1FNTUcuhzftIS0xi42ObiJ0YSRttREeGUbungUe/O49LT59MiDloYH7gwi1Pm6YztdZFSqlEYI1Saq/Wel3HHZRSi4BFAH/94TzuXXSRdL0BZNUX3/DTV99lzLwkzspIZPPn+eRtKCc6OYxR0xzzw26dwJt3beLQ4iO0tdkICjJwWsZoTNtMbH7/IJnJsSiLJnNBCsqo0DZN+Vd11JS0EpFtJjo6mLqmSp75w384+/zTuez2ywgND6Wmopb0pHjaai1oq0YZFaFJZkorXK+WeHqr7YYVn3J5loFxSWYuz2rqMpnTk7M5Maz0Kb9u/dXNXPCdWX4oU3TH3eTonKdzSKqOdu5zwtXj0TbN/sWFaA1Gk4EZZ0533j23v6iAipIK0i9LpD4xhL2l9rlGVeUWmmjqcakATzKsp/yytFmwWqxUllax4rE3GGlqZukXbUS3tfCfHz/BFeedTGiQkV+cPhHTaeP54qQx3P/ftykrrSE80cC/v/8dLjnt2LQL4T8eNU1a6yLHv0uVUiuAmcC6TvssBuxncBsf194tUxyvR99ey5h5Sc6RpbjMCMzRJvZ8VORsmiry6jnthEw+/NMPuj3ORQ/+ixAVRkKm/Tir3/2K8demEREbwrg0+62oBTsqKF2zl4JXwsmpqKOhop7idQbiTokkdmworTZoyGtxmSDZzt2tvh3VVtWxe80GfnGV/fOvOSWCa17rOpmzt9EoMXz0Nb9e3b1E8ivAuJscnTEvhUOrDpNyyrFb4EedmkJE7Qjn5Ox2Ls+Vmxjqcuk+59UvGXNFWo8Try1WC5ZGKxGZIQQbodUK9bnNzgyz2exrFU08JZu22ma0tlGRX0rVzjw+3pEL9U2kxESgrBbacvP49dwoJiabqWu2sXBZLYsunEZcdLizpjOmZJJiUry1aCR3rGxk5sR0b/9IRT/12jQppcIBg9a6zvHnOcBvfV6Z8Krc4krOykh0fp04IpKWNgs1RY3YrDYq8urJebuEv1x9WZf3llfXc/vDL7H4wRu4Z94s7nj6dVpCrLTUt9FU00rYpFDGjDkWXGknxLDm1QI+37mb3OJKlBHqDjVgClXU7q6n4WgL9fnNGK1BNNY1EtaHhdDaR5niIux/deMiTFyeZeCjlz8kf2++c92V3kajxPAg+TU0uJscnTwlkQMvFfb6gFzAua7cuZeew2vPLMNmttBS34Y5IojqvAamTXE9dlC8iQNfHeK+BQ+QkBqPpclG/lulxEwKIzwhiLqjrZRsqMbQbGDlw6+SFBKEyTEn95oZ44gKDyHuhHSXRgjg7y+v5juTgpg20j5h3RxhYG6WgX+98Qk7DhSy+MEbiIsO54X3NjI3y0B2opm5Wc0sWbmBe6+b480fqegnT0aakoAVjjvYTMBSrfUHPq1KeF1mciwVefXOkabo8BAq84II0SbW/3Yvmcmx/OXqy9wOAb/w3kaqigtYsnIDE8eOxGg2kHJeNEEJJuqONFGyrpraxBiip9nXg8rfW0ldayshc8I4KyOR7ZsLOLCiiMZ9jVhabITHBhF+chRxRaHsfe4DimqbUOEhpEzNJH1SunMJBHf2bt7N5uIWXvmqxGV7285NJAU1s375Ws789rkejUaJYUHyawhwd3NHW7mFtIwUqtY09bpy9oYVn2IoymP3xkiMIQYSZ8VhTjBSuaOe+pImDryWT+r0BOoON9Bc30x1cR0oTUS6kYa2SiyWFnSNgYbdNsprLYRGmUg5NZqMyhG8dMe3PP4+1m7bT1FpC0u/KnXZbtHbiDW1sGTlBm689AxWfrqFZQvti0beeHI4C5dt4aa5Z3ZpwsTA67Vp0lrnACcOQC3Ch+6ZN4ufvvouzMN5t1zph1U8/+PrerxWXl5dz8pPt/DUFfHcsXILK3fsY8pVo5zNV01yM0aTgZ3vH6YqtIk4wtn5Sh4T5qQ494nPiKT18kQq11Yy+96R1Bdb2LashJOzx/CXmy4EoLXNwo4DR3j/nY1sqKjDFBXOqJPHkjk5k4gOQfHjf98P4LIiudaaxT/4I3+fa183qrmpxaPRKDH0SX4NDd0+j+22hcSnxFFdVu3c9+svdlNdWEFjcZX9VvzmFj5971POHmXk7Q83kTozAXXERmuhlfRRMcR+K4Q97xwhOSMMQ7KFsLYgqnJsnPXdcc6pC19tDmP3S/mcdkUiEckmZ4ZlZWf26ft455E7AdfRe601C3/6GE/NtWdsQ3Mrc7MMxDvyKz7CxNwsg7Ohan+fNFD+MShXBBd9194YPfr2WtYXF/Y4stRR52HiP28p5tKMFOfr0eEhjJ+SzIEXi6j/Ry5REWGYMTJpdppznzaLhYhEMwcLm1n+8xyCTQYSg80Ulx8LuuAgEzMnpTNzkv3afU19E9v3F/Lu8x9ypKGZ8MQYss+Zwujx9sU+288c1y9fC+ByKe6FD79gl0H3OBoll+qE8L+yI2VUlVa7bKs+WkllTjGGTuvzjQpNJOffedTWNxIVGcZJE8fSsv0Qxpxi5qS7LicyIjOBqbOnAfZLYqecHsm950ST9Jccsr+dTErCsQnkNquN4o8ryV9VQm19I1aDgbNvzXY2TAAhiUHYWmHtU4W0WmxuM6wvOo7eAy4Z+8LHX2Iy6C6jUakl+wGc75PLdf4hTdMwcslpk/p0B0b7KFPHYeKHv6imNKeO5HHHQqfxaBPBSrP+mnCuWNZISla6y6XArFGJFB+oITokmM9uTuaOlY28/td7ejxTio4IZdbJ45h18jgAjpRVs+KLvbz3+me0BBnZ/ukmnr4ynp988Bk2Db+4zr4s/jWnRPDOwVpuf+rnLqNJtVV1LqNRcqlOCO/q+Hirpvom8jbtc7mDuqWmnsRO8xfjQ4KY0anhiYo0c+YN3lmUsXOGTU4Mpmh/JQmxkc45SBV59aQnxVJ86DAfXBvOWa82Ygw3uhxnhDWU0yZnQE0tT80N8yjDeqvpqSviWfT2Zmxas+Jae57eeHI4Kw/a3B67vLrePiLlGPWXy3X+IU2T6Fb7KFPHYeL5Y0JZ9UoexusynZf5tr6Qw+xkxW/WNjE/28jG6hZy3i5xuRS485U85o8J9WhiY8eh67jocMqr67n7kVdZ/OAN3BkdziMvfUhMKqzeVYuhrg4wUN8SRVzEsUtxnUeTZGK4EP1XVlROS1MLFQXlHPk6D6WgqaGZ+GATJpO9wYgLNnHCSPvojFHBL689lyCTsafD+lznDPu/00aw6MMyDoWFMG5qkvMGmMgGxfxsI79Z28TsZMW6Fw4RfMt4Z37lvF3C6UlpTEio7zXD3OVXx687jt6fm1bFVyVW4iPsP7eOl+I6H1smhwcGaZpEt7qbtJgaHEbz6kbWFxeSOCKCoPJGTjg5mHV5bZyQZKS0pJgHblvA66t3sL64kJHx0cQ2av54tT0YepvY2HHo+t7r5rh8feOlZ/Deuq0sWxhLfISJTWMtzF9ax6VPFNJqsz93KCIimLjSr51NkafLFAgxnFWWVNHW0sb+z7+hqaaRhpo64s1mUJASEkRGfCSTIsP4lpdGgQaCuwwLaTRw8KUiSiKqyEyO5e7zz+bPz75BRKKJqiYb52QE8fm2RirfrWFPlX1XLjYUAAAgAElEQVQqwy/mzubpZav4oweTs3vLr44jX5dmwUvbm5j2z2LnyBfYL8V1bIjcjfrL5HD/kKZpAHVckTszOZZ75s3y6oJlnc9oPNmnp/e0T1p0Z19+CRf/6DFCw8O5dkoQ6/IsPHVpKHe818z8bCPvfbadl392E7c//BLTxo0kPKHeo4mNnSeezz17msvXDS2ukyRPHZ/M/50fCmmn8ONrZ1NSWcfSdV+zs7iaj1/8H9PnntrtMgUy2iSGI5vNRnV5DWV5JRR+c5iG0ipGhJhJDDaQFhvJ98enkT0qHoNSzlEkcOTX8v9xz7+WBUx+9fa+7jKsPb+e+O0ifvr468zPNrpk2LVTgthjMZCYFM/iB25gycoNbidnd14qoD/5defZNZB2So+jRu5G/bsbkRK+JU3TAOm8IndFXr39bjbwWvB0PsPxZB9P3uPOA0++QaypiW1789mhNNdMCaLZqpmSaGDJjlbCwwqcx36zoMTjiY2dh6Dvf+L1TpMkt2FSNrfHuve6OSTHRXHvt88AYPvBIl576X98/O5nrG5r4cUdTS5nc1Flu6VpEkNec2MLxblHyduZQ0NhGSYUkxOjyI6N5J7ZJxIWEtzrZbRAzS9P39dZe3799PHX2b6vgPWNrV0yzGjMY1JyCEtWbvBoqQBv5Fd3uh317+V9wvukaRognVfkThgbBfPs270ROp3PcNwN2/Z2FuTpUO++/BK+2nuI5QvDmfdqA2kJsTx06QjiI0w8lGLh64Y6/v2L2/j+759xHNv9pMnOExvb63EOXWeH8K/1h3jqTvsddfZJknW8/tf7PKrzpKxUTspK5Y/Xn8f+gjJe/Gw3h+qaST9lHFPOnUpQsDzDSQw9ljYLVaVV7PzfdqiqJ8Ri4YSR8Xz39PEkjDi5X4+3CsT8umnumWit+5xhHfPrimWHeO1Pd/PgYy/z0KWRzgzbWVuLTWuemh/BHSu39Jxfc32TXx31NOovBpY8HG6A5BZXEpfhOn8mLiOC3OJKrxzf9QzHPmxbXl3PlQ/8m4qaBrf7uJ4FGZy3v/bmgSff4NopJqYmBzEhTnF6YlOXYWN3x/a0nvZjvbennmunmKioqefK5wpQSnV7rJ4YDAYmpCfxh+vP57lbLmROkOLzR5ez6sm3ObjtAFrLUzPE4Gaz2di94Wve/+cKPnnkdeo/2MpDp43n2e9eyBO3Xcztl0wnKTaq388D9Ud+AS7/nbvbp7v39aRjfl07xcT3H36hy6Wvc9NaiQ9q7lM9vsovEVhkpGmAdF6RG+y3umYmxx73sbubJNjQ0trtBMRrTgzjX08e4s+3jnJ5T29nau1naf+6xR6gNg0v7Whk9eFigkz2QLbZNGXVFTzz49Ee13PjyeH84x+55I8IZ+lXLQAcKa/DgI2nvywjLVIx4/FCYiNDj2u9klBzMBeclMUFJ2VRUdPAyq0HeO+DrYSOjOfshecSHBLs8bGE8CebzcZX676iYPtBghubmXvSWP5vwRlEhYd6/bP8kV83zT3TeentX298wiebdrrsc+UrXW/X7y3DOufXHTNDeGpLBUss0c7csdk0ZVV1TEkK7lM9A5Ffwv+Mv/71r71/1ILNPjjo4BYfEc6yN7dhTgwiNDqI8tw6ct4u4ZcLLmLcyITeD9CDf7+5lvFBxVwwzr4GSliwgfzyZpZ/kcvzC+P5x5p8ymubmRJa5tynoaGB5pYWDtcqTs8IIyzYQEVdKzuLLZw+dWy3n3XbH5dwYXIdl2ebAbjhxBBqW2y0Rabx8VM/ZdEVs2hobuP02GpOTAnmhpePcPmUKMpqW9zW03Gf0GATU085led+9T0WXTGLn1x/ETfNPYtPt3zFc9+JZ8MRA+88eh/zzpnGH55dzlPzovnHmnzmnnMKYf1odsJCgpk2JpkrZo4ny2zknVc/ZfOnu7CZDCSMTEB1WlxPdG9K4rTf+LsGb/m6bOev/V1DT/Zu3cfGlz+hcN0uvpUygjsvnMaCMyYxcXQCZh9dch7o/Kqoa2VDbiOrN27jqXnR/Pbt/XxrXBCXTAh37pNbUkN9Uxu3nDrC5X09ZVjn/IoMNlDf6ia/4mr5zUVxzmxqamlzW8+MkWa/5ZfwoogkiMnwKMOUTy5NbHxcrne44au75y6/7wmKSstdtlXWNXFltuKv3x7N39fV8MLOVkyGY7+W0qo6rFYbNgykxUc6t6cmxvd4/Txj3gO0tbZ02R4UbCbv7Ydd6qmsa2KEqZVqiz0Q3NXTcZ/YyNAun//3l1fDkS+595xo/r7OfpcJ0GWbt87W6hqa+d/2g7y+LYfYKelMOXsqI+Kje3/jMHf15JuGTIf56u4lAZdfzY3NfLFiA415xZyXPZIbzpvqcmfbQBjI/AKw2BQ3nhjMvedE838rDvPmPk1s5LFRtNKqOtqsuOQX9Jxh/c2v2MhQt/UAAZVfop+Sp8KYcz3KMGmahqD2CYrLFtonNpbXW1i4rK7fK9geTw1PzQ1j0dsNzmH0jvU4J4t3s8Kuu+/jyldq3R7LF9/b9v0FvLh+DyVGI1Mvmk569iivHn8okabJNwoPHWHTa5+SEhLMHRdMZWJGsr9LGhD+zrCO+XXHykZnVrnLomfnRwRkfok+6EPTJBPBh6Ce1vToPAFxX34JmfN/xoGC0m6P159Ji66r3tonVXoyWby376O7Y3k6ib0vTho/ir/fModH5s4gcutelv/uJXZv+Fomjgufy9ubz5u/fxnbul08f8ts/vm9OcOmYYLuM+zJ1z/pkkW+yLDeJnn3NFm8p+9hIPNL+IZMBB+CelrTA1wnIHZcr2TFX37o9nh9XQel88ROd6vedjdZvOMkTnffR2lVA21WmP7kwK1XkhwXxQ/mnsodWvPSxztZ8fuXGXPWZMZPzyas07O0hDgeB3ceZNe7m5g5Mo6Xv38JwUHDM6K7XRfJ9iWxQa0uWeTtDHM3Mf3pJ/LILQz1aLJ4oOWX8K7h+V/kENfd9fzy6nquvO9Rfn6GkT98spkzThzPjm8OkhWj2PHNQQ4UlDJuVKLLCrv9WQel8xmWu1Vv26/197TCbaCtTaKU4oYLpnHDBdNYtXU/7/z7XRqjIjjnmln/3959x0dZpf0f/5yZSS8kIQECoQoJRSB0JCjFAggCdnERsWFZXLE8ruhv133W9dF1VxcUG+KKqGBhXVGKDRU7VRAEBaSGlk4SUqad3x+TYHomYSb3lOv9evkyM7kncyWBL9d97nOfQ1SsDK2L5jv40wG2rPiWjI6JLLtjImZzcF8EqOvvfmV+PTDMlV/XT8ogp6C4VobFx0Q2uMtAYxlW1wjRrSMS6syve85rdfoYX88v4RnSNAWRJau+ZVQHK51jHIxqb+X2vy+hZ2tFuV3Ts7U6faZW9awMaPImke6sXuvvK9xOGJzKhMGp/JqZw7yX1lAQEcqo340lupXsZSfcV5hfxBeLP6Z3bCRLb7s46JulhtTMr1dXfsNX2/bUyrBz+/docJeBxjIsGPJLNJ/bE8GVUmZgE3BEa93w3hMyEdznVJ6l/X1kKenJFlbsKue+j0roGm/mpUsiuOWDUg4XwZt/v4u5899ocAK3TFqs7dDxPP65ahP5oSGMnDaGVq1jG39RAPH1ieBNya+WmAiutWbbZ1vJ2fAzT0wfTYKMVDaoZn5tPWbnzs9CyMk/SccYqmVY29ZxvHp5bL0TuCXDRC1emgh+F7CreRUJI1SdIFl5ltYl3ky4xcSSbVZ6J5mZkmYhLdHM1J6u1b1ve3wJozvCg6uyGNKmrN5JizUnVgb7Cred2iXw9E0X8cSEgfy0+CNWL1hBQc5Jo8sSv/GZ/Mo5lsu7j79J/1MlLLpjojRMDajMsKeWflItv7rEmyk7VUhqAkztGXI6w1IToKS4iAdXZTGmE3VO4K5vQnmwZ5hwj1uX55RSKcBE4FHgHq9WJDym6gTJsnI7O/YUsXKnCZMJ9ubYiQ1TzB1pZtsJOwOTTcz/3kGhNRfVPZ78U1Y6xpjZdtRebQI31L/RrqxwC8mJrXj65nFk5xfx4MsfYu6YxLlXjcLcwmvqiN/4Sn5prdm8egPluw+zcPoY4mPlJoLGVGbYGx+ux2kvP51fTiccyrdzslQxd6TJlWHtTMz73kmp3UFihAaHjZ27q0/grlTXhHLJMOEOd+c0zQPuB2IaO1B4R9XJ2U3dVPeyt3/lyvEjubBDCXeOiOHg8Vz+uxuKik/RMyWOdhWXkq7PPM7mI3Y+3V3MQ+eG8uhXNm4Y1pqorkOqhUh9G+02dePfQJYUH8NLt01g0y+ZPP34MsI7t2XklbJNi0EMzy9rmZWVTy1n+uAeTL4x+P5Bbmp+QY2NwZed4obhrXlobBwHj+fSJbk1Vy45Rt+2Zkb3cy3FcDy3kBnpTpb/ZOOBkaE8+lUx0wfH159fk2LPaONfEZwabZqUUpOALK31ZqXU6AaOmwXMAnjx/quZNSXDY0WKpp8F1dyUcslHG0iMDeeF709SWlpGmUMRaobFP2bTJr4McN0OW1ru4Nq+FlJiIL0tvLKpkLPzq09urDmxsvp6S+5NFg8Wg9NSWJKWwo59R/nr48s4a3Q6/Uf3N7qsoNGc/Lr54Zmcf2W9hzbZwR0H+PGddcy7biztk4JzdfnmjOJUzbCpaWZe+j6fpVuLKS0tIyKilKJSG5uPwOr9rsnYWflF2GwOWoXRpPxq7g0vIji5M6cpA5islDoAvAmMVUq9XvMgrfVCrfVgrfVgaZg8q+otsyvXbWz0mnvlGdrtQ8MB16aUFm3l2QdmkpIYw6e3d6Zvl0R+fPNvHFr5JJte+wubXvsLW17/X3p3SmRa3zC6tQ5lWt8wOibG8Mqfb6hVy4yBrrOwiWnhbP/5V65Nd11qmDEwyq0ag83Z3drz1pypDCgs5t1HXufIvqNGlxQsmpxfnmqYtNZ89dYX5H+xldfvnhq0DVNT8wtqZ9j950USYdEkxkby6e2dSUmMYcebf62VX6kpiSy+NMrt/JoxMIp3165nxecbqj0nGSbq02jTpLWeq7VO0Vp3Aa4BPtNaT/d6ZeK0mqvTNrZ6bOUZWmKkib25VpIiTVx7toXbHl/S6ArcNSdbVt7aW7OWyomVq3YVc+3ZFrCVArLCbUOUUlw1qi+LbroQx7ptrHrmPYoKio0uK6AZlV9aaz5euIor2sfz+HVjg3rz56bmF9SdYZemmWmlij2aX4nRFpJCyhjV3iqrdAu3yDpNPq6u1Wlrrjxb0w+/HGaD1cZLmwswKY1TK0xKYXPkMeP6bvV+nU827Ko2WdzphOwSJ2ef3FXv+iRHcoow4ax2mQ9kvZKGxEZFcP9lGew7ksOTL63CdFZ7hk8eIZPFA0RBzkk+e/Z9/nBhOiP7dDa6HEM1J7+g7gwDTVqiud6v05z8AjiSU1btMl8lyTBRF9mw18fVXHkWcGtn7JyCYqbe8y+i9ClKVBQXZQwgKnd7g1+nue8lzszaH/by4ufb6X/5uZzVr5vR5TSLr6/T1BRnsk5TXlY+3764ihdvGUdURJgny/JLZ5IpVTPsSLGJq/tH8acL4uv9OpJfotmasE6TjDT5uOauPLtk1bckhZRx8pSNxKgy/vPZZiwmLavc+qDzB3RnVN+uzP/ge977dAvjb7uE8Ej5B9ff5B7LZf3La3jp1vFEhMldknBmmVI1w5LD4ZVNDlb8Yqv360h+iZYgI00BqPIMzVx+koWXRDLrgxKcYa1471/3yG20Pu5Yzknuff1zeowfQs+hPf1mLkywjzRlH8lmy+JPeGHWOMJCQ7xRVlCRDBMtyksrggs/UXmGNiXNtVLulLQQEkPK6lzJW/iW5MRWvHHXFNrvP8Z7/3ybwrxCo0sSjTh+6ARbl3zKi7eOl4bJQ+rLsOeW117JW4iWJE1TAPpo/S62HS1lWIpiZ7aNYSmKbUdL+Xj9rlqb8Qrfo5Ti1gmDWXDlSDa8uIptn2/FKyPC4owd23+cn5Z+zguzxhMaIrMdPKW+DHvr002SX8JQ0jT5AE+P/owb1ovZ5yaR0TuF3l3bk9E7xfW4/1m11kuRkSfflRQfw6I7JnJ2YTEfPLUch91hdEmiCpvVxrpXPuT5W8YREsR3PnojQ+rKsNszWuOwW2ut9yQZJlqSnBr5AE/veVTfhEi73sKMfiG1VsKV/ZZ82+9G92NQt2T+/NfX6D1pOD2H9jS6pKBnLbey6snlPP270ViCuGEC7+zZVleGFZ4qI9Jsr7Vqt+wZJ1qSNE0Gq7parqf2PHr/ydl1vs9V98+vturt1KXrMZsUC2W/JZ/Xs1MSS++awqJPtvDxjgOMvnas7GNnoM/fWMvcC9Pp1C7B6FIM5Y38gtoZVplfNdd7kn0vRUuTy3MGa85quWfyPnWthOvt9xaeYbGYuW3CEO5O78Kqf77DicNZjb9IeNyPX/7IebERpKemGF2K4YzMr0ndTTX2vZQME94nTZOB6toHyVt7Hn2xZTdLt5cz+NksBj+bRfrTx9l2tJSJ3fH6ewvP6ntWe56/fiwHlq/j162/Gl1OUCkrKePg2h+4/vx0o0sxnJH5NfjZLJZsLWXn7gOyZ5xoUdI0tbCqkxYrz54ALn/lMEopr50tvf/k7NMbW2567S/MmDiS2ecmMSy1HSD7LfmbhNgonrrhIkK27eWTRavl7roW8uGz7/Pk9edjMgVndPpKflVm2K0jEmTPONGiZE5TC6s6abFysuOCbwuIs1gZ8kwmCTERLbKCraye6/+UUtw/9Ry+2XmIfz7yOuPuuIS4xDijywpYm9Zs4Mo+nWjXOtboUgzjK/kFkmHCGNI0taCakybf+ccctNZcdf98np8Uye0rS3jnH3NaZCJjXZPFhX/K6N2J9K5tue2FVZxzy8UktI1v/EWiSWxWG5nf7+Kyu6caXYphfCm/QDJMGCM4x5gNUtekyZaaSCkCW1REGAtnjWPn65+y5cONRpcTcDau/J47xw/ym21tvEHySwhpmlpMXZMm3127nhWfb5CJjMIjIsJCWXDzOHoUlbBh5fdGlxMwrGVWsn/cR0bvTkaXYhjJLyFcpGlqIQ3d8i8TGYUn3XLRAPrZbHz37tc4nU6jy/F769//jnsvHmJ0GYaS/BLCReY0tZC6Ji0eySlj8xFYvV8mMgrPmjm2P4kbd7Pk/5Yx8d4rCIsIM7okv1W4O5PB4wYYXYahJL+EcGm0aVJKhQNfAmEVxy/XWj/s7cICjUxaFC1t0pBUBnVty11PLufiey4nPDLc6JJa3Jnm16/b9zGqe7K3yvMbkl9CuLhzea4cGKu17g+kA+OVUsO9W1Zwkw0ohackJ7bimd+NYvWTyyk9VWp0OUY4o/za89UOpp4je/01heSXCGSNNk3apbjiYUjFf7KSnhdVXQtFiDPVNiGW52aM5cOn/sOpwuD6h+xM88uZV0hiXLRXagtUkl8ikLk1EVwpZVZKbQWygE+01uu9W1bwqroWityJIjwlMS6aF2ZewCfz/kvxyeLGXxBAmptfdpud1tER3i0uwEh+iUDnVtOktXZordOBFGCoUursmscopWYppTYppTYtXCFnGM0l654Ib4mPjeTFmy5k7dPvUZhXaHQ5Laap+bX2nS8A2P71dqamd23ZYv2c5JcIdE1ackBrXQB8AYyv43MLtdaDtdaDZ03J8FB5waUlN8AUwalVdAQLb7qIdc++T0FOgdHltCh38+v8K0e7nnRqWkUF3+T55pL8EsGg0aZJKZWklIqr+DgCuAD42duFBaO61kKRszXhaTFR4Sy8ZRyfPv1ewK/jdCb5deKXTNontfJmeQFF8ksEA3fWaUoGXlVKmXE1WW9rrVd6t6zgJBtQipYSFRHG3CnDeWr+f5n4h0sxmQN2ndtm51eohjbxMV4tLpBIfgl/o7Xmo817ORRWxKxbR7n1mkabJq31j0Bwr+zWQmQtFNGSBnVvz/0mxd/n/YdJcy4PyMZJ8qvlSH4Jf5KVX8T/W/Ylkb07M/2KsW6/TlYEFyKI9e+WzH12J8+8+hHjbpxgdDk+xWY2cSKvkLYJsUaXIoTwEIfDyVMrvmNLThFjbptEbHxMkzbiDrxTSyFEkwxM7cCw6Ag2rd5gdCk+pU2P9hzLCZ67DIUIdHszs7lm/gpsQ3oy5X+uIrYZl99lpEkIwW3jBvJ/b3/J/p0H6Nq7i9Hl+Awt6/gK4ff2HM5m3prNFEVHMOX//Q5LSPNbH2mahBAA/M9lGdzx0keER4ST3LWd0eUYrlPvzry34lsGpHY0uhQhRDMUniplwZrN/FJm59xZE4luFXXGX1MuzwkhAAixmHn+lnF8tfhDbFab0eUYrnVya/bL5Tkh/I7Wmtc/3coNi9cSccFAJvx+skcaJpCRJiFEFRaLmaemjeah5z7gkjmXGV2Oofbt2M/BE3k4HE7MXrizsNxqY8vuTI9/XU9rlxBL1/atjS5DiEbZ7Q5e+3wbH+7K5KzR/bjy8vM8/h7SNAkhqumSnMDEbm3ZsOJbhk4ZYXQ5LW731r0se+IdwsJiSTmrL4s/3spNEwbWe3xJmZVfDp0AoKjEyvubMzGZXHfjOJ2aY8WaiNjai2RarXbieo7AEhrmnW/EQ07u+AlVesjoMpqkrDCf5Bhzk+6KAgg1w6VDu5z+/VXVNj5WFjv1UeVWG69+9iMf/3yYPhOGcOkVo732XtI0CSFquXZUX35YspZj+46S3K290eW0qPef+4yr5jxJylm9APhywRymjbFy8HgeWQWnWLXlMKXlNopVNCFhYZRZHcSlDsWkTGilSJt2KyFVGqE0o74RTxni3qJ/gaCoIJfXdmys83Mn1+8ixP5rtedshbm0iQ0FQKG58pxuRIa7Hp/VIZHQM5hwLBpXUmbllbVb+WzPUQZMHcEVV4/x+nsqrb1wd8i3z8gtJ0L4uZIyKzNeXMPUudMavdvkmj7XN+2U3oe99OW+avn189cryf5lI626pWOyhNJ7xHhMZrNR5QkfVV5awu71n6DROB1OCvf9QFR4CGWFBXRuHYbWmilDu9AuIYYOSXFGl+vXTpWW8+LHW/jucA6DLs2gyxne8dsjoSeDkoe5lWHSNAkh6rVu+37eOpzHedMaPoML5KZJiDPhdDpx2G047HZ+/mY1JflZhJacwG6z0SFak9Ermf5d25AUF+2VuXOB5JeDWfz7ix85WGZj0JQRdEzzzJ2tTWmaAmbssLTcSrnV7tGvaTaZiJFdzkUQG9W3K5/uOETmL4dJ8VBACRFMTCYTptAwQkLDSL/wymqfKyku4qNtX/OfL45SdnwbMWY7vdrHcEVGKm0TmrZSdaCy2x3sOHCC59duw5YQw4jrL2JAq2jD6vFK02S3O7A7au+efiTnJJ/9eLje12mt2XKwgJDwpv9A8ko1ka2Tm/y6hpQWnyTGWYTF0rQfk7P8FP07tWrwD/z4gV1Iiqv9fZpMSq6DC5/yv1efyzULVpLyp+lGlyJEQImMjqFXxm/bF1nLy8jPPsYDaz5A52+mV5swhvdsz6h+nYOugbLa7Cxdt51VPx2ibe8unDN7KuGRxt804ZXLc9OmXa0j4tvWfrPQSM4aPh6l6h+CbJWQREiY8T+YM1FeVkpRfm69n3c67Pz63WpwWGt9rqQwn8RQB2Zz9b8gkcpKn5Tq18H7dk2i/1mebRSFqMvKDbtZW2JlWD1308nlOSE8Lz/rGJk/rSd/59ektQnlypGppHVMMrosr/pm5yHe27SHw2U2eo7qR9qQNK83jIbPaZLQ8byiglyKTxZUe+7o9m84lXv09GNrcT7JsSGnH9utZYxMTcRScZ28T+ckenVu0zIFi4Bz3fwVnH/fVXWe7UnTJIR35WUdZdfn76Jy93FBnySuHds3YEafysptPL1qAz8eyyfx7K70HtmH2BbcKFuaJgGA3Wbl2IG9px8f/+lbrIXZAJSVlpAYaiW04tJjYoSif5d4AAZ2TyY5UdYjEdWdyCvkD+98w6X3X13rc9I0CdEytNb8sukLjnz3ARf2iueGcemYTP43gVxrzdvrdrBm50Gs4WGkTxxKJ4O2LArKieCiNktIKB179D79uOrHNeUcy+TrvGy01ixfsw5sBwCwF7nWIdEaerWLoEe7WGIiwxjUs5O3yxc+pm1CLH3joji27xjJ3eSysBBGUErRc8gYeg4Zw+5t33HtvHcYnRrHjeP6+/x8WJvdwRdb9vDutgPk2Z10O6cX4x6Y5lcjZo2ONCmlOgJLgHaAE1iotZ7f0GvkTC0wHdr9E6WniijOOojtxB5AUV6UR7vYEBwOB+P6tiMuKoy+Z7UnKsK/56WJuhWdKuPm1z7n0j9WH23y1ZEmyS8RDPbv/IF9H77MQ1cMpG+32vOJjbZ1dyZvrd/Nnrwiuo/qR+/hvQgNCzW6rNM8PdJkB+7VWm9RSsUAm5VSn2itd55RlcLvdErtU/HR8Fqfc9jtfPzDNzjzbTz32reEhZhcWxnEWogKNTN5SCfCQiz07iojFP4sJiqcAYkx7N++j659uxldjjskv0TA69p7AJ3T5vOvd56h99aD3HfZUEPrcTqdbP7lMEu/+4VjZTYS01LoO20s/Vu33Dwlb2m0adJaHwOOVXxcpJTaBXQAJHTEaWaLhV6V2y2cc0G1z+VnH+fVX7ZSUpCNY+1G7FYrceZS2raK5NLh3UiKiyaxjuUXhG+6/7IRTH9+tV80TZJfIliYzGbOuWYO+zZ/zg3z32PeTefSKjqixd7farOzftdhln6zk5PKRJs+nUi/dRLDIgNrrcMmXQBVSnUBBgDrvVFMIHls9jSKi4tqPR8dHcPcBcs89hp/EJ/Ujvik8bWezzueyXM7N1J4ZDcRlFFeVEDn1uGkd00kvWsSKUlxWCyyXYWvMZlMDG6f4E+jTYDkV1NIfvmvboPG0LbHAG5b+BeevXkECbFRXnuv7Pwiln/3M9sOZVNsNtP27M6MvO8qzAGc2243TSe3oYgAABVDSURBVEqpaOA/wBytdWEdn58FzAKYfu/fOG/yNI8V6Y+Ki4vodvMztZ7ft+hOj77GnyW0SyGhXcrpx06nE4fNxjc/fsuHG49Qcvh7wimja1Ikw1Pbkd6tLfGxkQZWLCrdeckwfucno00g+dVUkl/+LSo2juE3PsLvX/4zz9ww3GMj+Xa7g6z8Yt75bhc/HMmDuGh6ndeXkVeManR/ykDh1neplArBFThvaK3fresYrfVCYCHIRErRPCaTCVNYGKlDftvnzG6zUl5Wyntb1vHypp9oZSrDZCumd4dYLs9IIyEmUkajDBBiMTOqSxt2b95N6qBUo8tpkOSXCEaR0TGMuOkRZi/6M/NnDqFtM9c9OlVazn+//5ktB7I4UW6nVec2dBvRh4u6JGMKwr3yGm2alOtewJeBXVrrp7xfkhC/sYSEYgkJ5exRk2HUZMC1m3jWiSM8+PFayrK2Em2xMbBzHOMHdqF9YitpolrI7ROHcNULa3y6aZL8EsEsPDKakbf8jdnPP8DSey4gxI1stNsd7D+Wx9JvdnK0zE6R00mv8/rSZ+xAhnrxUp+/cGekKQO4DtiulNpa8dyDWuvV3itLiPqFRUTSvksP2nfpAYCtvJzs7GP87xdrKMneSbsIO4O7JjDlnDSifWCvokBlMpnoERNB7vE86NP48QaR/BJBLSwikn5Tb+e+lxfxr1tG17kQZl7hKVZu2sP6fSfIsTuJT0mk75QM0tolGFCxb3Pn7rmvAZ9cgyXQnMzN4ciBPXU+35A/TB6G3Vn7ioLFpHj6/brnvAbSpM2QsDDapHShzRW3A66dw3cdPcjK11cQZSsguZWFKUO6Mjitg8GVBp67Jw3h/v9+A+ffbXQpdZL8ajktmV8QWBnmbe269SLvyBhWb9zDpGGplFttrNq4m+9/PcGhohLCEmLpM7Y/Q8YNJTzA7nbztOCYuWWA6OiYOidARkfH1Psa7bSTu7L2FQTttDf4XnanpvPsJbWeP7hgRr2vCeRJm5HRMXROPZvOqWcDUJCbxSub1vLkh1/QqZWJUT3bMKp/V1mA0wPaxMfgyK01r1r4OV/PLwjsDPM0h93O0UP7uffzlSzdlIolNopu5/Sk+/A+DGzBPd4CgTRNXtKcM524pHYSAl4Q17oN6eOmwbhpFBXk8fnBvbzywnJSop1MHZzCeend/WoZf18zJq290SUID5P8Ciy5J46wbuUb/Pm1B0loG290OX5NmiYPqGuYOPdYJspkRpmrT7yrOuR8+7h0dJXPa7ud3EevADRhrdqcft5e1PDwtnBfTFwCMXFDSe0/lLKSYt7e+CUvLPiKeHMZ14/uwbDenY0u0e9cMsR3J4KLxkl+Bb42HTrTKq41YRG+s3WJv5KmyQPqGiYumHczSZP/SESb6hvbVh1y1mYzHWe/fvrxscVzSJ45D+uJfUQmn1Xna4TnhEdGkz7qYhh1MXa7jZfXLOPpTz4no1sMMy/sR2S4BIw7kuLrv2QjfJ/kV3Do1msA361azwXTxhpdil+TpqmJ6jory8/J4vjhfbTrWP9Cf5mv/xFtLcXpcHDHpIp9gTTYcjOxJLgub2inA6etHICSrIOuYxx2nA776dc4bVYwm1FAQhvXPm5Oh53986/FHFV92NVpLa+3nuZO2gxUFksIQy5xhfuh3du5dfFyIqz5PHD5ILp1SDS4OiE8p2aG5edkceTAHsxmc70Z5qn8AleGKbO5Wn4deOY6tNNRLcMayi+QDGuqMdNms/iRG6VpOkPSNDVRXWdlPy64HYfD0eDrtLWU5JnzsWbtJ7KdK5gOLfo9WjtRqvIWUI2yhILZgqXVbztVK5P59ETJw6/cRfL187Bm7adT914AWPbsJGfVUyROvr/ae55YNrf+epo5aTMYdErtS6fUvpSVFPPXFYsIO/kDd1/Sj56dfW/3cCGaqmaG/bjgdsISO1Gec6je13gqv8CVYa0n3FUtvywJHTj+2r3VMqyh/ALJsKaKT2qHxSx3xp0paZp8TOWEZFOI686uyjO3Rl6EMluIbFN9Po7JXP+vVyZtNi48MpqMaXMoLyvl8RWLCFv5KbeP6016d5n4LERdmptfppCwWhnWUH6BZFhzKJOFw3sz6dg9pfGDRZ2kaarCnXU/co8dJu+xq6t9XjsdZK2cR4fZz55+znEqn+z3/17lcQGZz85AOx1YYlyXexxFOeS8/wTK5Po1OEsLObZ4DpjNtJv+z2rvUZrlOgvUDrvrEp2uf6eHI2/MxWktwel08NDMSXV+H8J9YeERZFx9J9byMp7+YDGhH3/KY9NHyD54wuc0J8O008GhRb9HWcJOZ5i38gtcGdZQfkFFhtXIr5rfh2i6lI7dyT6SI03TGZCmqQq31v0wW0j5/avVPm/LzeT40gdqneEkTroXTBV3lzjshCR14diSu0meOR+AzGevJ3HiPaeP12iUMnHirT9xfIlrsUDtsKOdTkISOwKgzBZMIaENLtfntJbUuoRX6/sQTRYaFs45V9xGYV4Ot7zyJJf0imHGhf1kuQLhM5qTYbbcTLR2cmLZg9WO80Z+gSvDGltu1Gktoe1Vj1TLr1rfh2iyNl1T2fHV9wwclW50KX5LmqYq6htFwmH/7YxHgzXnEEqZCGnt6tZDEuq4XKMUoQkdsIS6hqlLju9D1Vy+vjI4KoahlcMOJjPKbCH5+nkAWE/sI3fN0w0XrjXaYT99NmcvzObo4rvA6aDAEnL6MLn11zNiExK56I7/Y8emL7h+3rs8OTND7iATPqGuUSSgVobZcjOr5VetcZ8Wzi+nzVorw3JWPlktv0Ay7Ez1HXUJP/1jldFl+DVpmqqqYxTJmnOInPefOH32VjDvZkJap7jOzuxWALTWoKh2hpf32NW1gshpKwenA2vWfipeiCWhA7+ljxNQaIedw89Mr3jO9VWO/ns24Bo2rxwmt7d1NWu5J44CVBlOV7T73ROAa3SkUtVbf5uz4q/4jVKK1CFj6NRnCH949VGuGdyWKef0MLosEexqZJg15xChiZ04+u/Z1TJMa2f1/EJVyzBv5Re4MuzEW3+qll/KZEY7HdUz7Lonq+UXSIYJ40nT1AxKmVAoQirOwmzWcpRqZPdok/n05MjKu09OfyrEtR6Q01bumhBpMtHlzteA2neaVNq36E4eXbwSgIdmTqrWsG167GpMIWGnQ7EuMi/AM8Ijoxl7+2N8uHIx1i93ceV5vRp/kRAGq5VfltCGM6wF8wtcGdbYZW/JMGEEaZp8gNNmrfJxOdrpNKQO2QCz+QZOmskXa16n/LOfmD62j9HlCNFiJL/8h25kAr5onDRNVShUg6MzAKbQcI7+ezaOkgJMFZMknU5HlbVKKmkyX7jxt2McdkCBw356iNlps3L8tXtRFcdU/nHWNY7JevtP2CsWgqvU4BC0UthyDqOddpyW337Fplo1VicbYJ6Z9AnTWf/xm5jX7WLaKBlxEi2vrgzTunoTYwoN5/ibc2vklxmqXZAzML/AlWF5R6rlFzScYZJfjdu0ehkd0zoYXYZfC+qmqeaZia74a6/g9ARIe43bPNLvWAA0Prw8eO7b1Y6pyx2ThtLp5mdrPX9wwQyeW7mh6d9QhdDoeCLauBar69Dlt3k25a1lZWtv63fRNaxc+GcyeubTSTbGFF5U18iKRtfKr5ondOl3LPDp/AJXhpktlmr5BZJhZ6ow+xh9Ljir8QNFvYK6aap5ZpL32FUcefEWtHacPsPSDgfa6ah1tlL1TKm5ExKVdnJ08Zw6n2+Kmu9vL8rh4IIZmJSpWsjIBMmWMeiqOTz17qPMmyXbFQjvqWtkJe+xqzhcZYRIOxxoBTiqZ5gv5xe4MizrrT/XapIkw85M1olMhsZK03QmGm2alFL/BiYBWVrrs71fknEGz30boNEzrJqae73cUyvayvV63xITl8BRWwxl5TbCw0Iaf4HwqmDLMMkvUZ/ysmLSBqQaXYZfc2ekaTGwAFjSyHFBQSYbCnf0OG8Kb325husv7G90KUIyrBrJsOBUfDIfq7XU6DL8XqNNk9b6S6VUF++X4h88OdnQ19YZ8bV6/Fmbjt354fs8rje6ECEZVoOnMszX8sLX6vE13698jT7D04wuw+8F9Zwmo/naWZ2v1ePPwsIjKG74Rkwh/Jqv5YWv1eNrtn+3ltnzbjS6DL/nsaZJKTULmAUw/d6/cd7kaZ760l4jZybCW/KzjzOos/w58heSXyKQ5WefAGUjoW2C0aX4PY81TVrrhcBCgJe+3OcXK2jJmYnwluxffyS9bZzRZQg3SX6JQLbyxUeYMPNCo8sICA2vdiiEaJYj277kooHdGj9QCCG8yGYt59C+nfQf1c/oUgKCO0sOLANGA4lKqUzgYa31y94uzFfJkLhozJ4Nn3Bhz1aYzXJO4gskw6qTDAsu7y54mOHjBhAaFmp0KQHBnbvnfP/ifguSIXHRkML8HHI3ruTmuy4yuhRRQTKsOsmw4HH84F6O7N/GzY8+aHQpAUPunhPCQxx2Oxvfmsc/pw0xuhQhRJDTWrP0H/dyzX2XG11KQJHrB0J4gN1uY92ih/nL5O6ktJE954QQxtqw5k3ad0sgbZCszeRJ0jQJcYaOH9jNuqfv5m+XptKzU5LR5Qghgtyhn3/gixWvcO0frza6lIAjl+eEOAM71q1A713HG/eOk4nfQgjDlZeVsuypudz2xI1ExUYZXU7AkaZJiGYoKshj47J/cOWARC6dNRallNElCSEELzwwnavunkzKWR2MLiUgSdMkRBM4HQ62fvoO7P+WBTNG0rqVnMkJIXzD2//6I53S2tBvZF+jSwlY0jQJ4Qanw8Gm1a9jO7iFmWNSGXXxRTK6JITwGUufuIfIVmVc9+C1RpcS0KRpEqIBNms5W1YtwX5kB7ddlMbwS2X9JSGE79Ba8/pjfyAhWXHNfbIkmbdJ0yREHcpKTrH1wzdwHN3OnIl9Sb9SmiUhhG/RWvPqI3fQrls4V951mdHlBAVpmoSo4uj+3ez57C3CyrK5b0p/enUZb3RJQghRi9aalx++ha594pl6x2Sjywka0jSJoFeYl8PeTZ9xas/3pKdE88J16URHhhldlhBC1Cn76CFefvhmMiYN4eIb5cSuJUnTJIJSfvZx9m34hMID2+gab2HmkM4Mu/h8o8sSQogGffmfRXzz4Vv8Yd6ttOvU1uhygo40TSJo5GUdZc83qyg9+gs9EkO4bVg30qeMMbosIYRolM1mZckjvwdzEX9960EsIfLPtxHkpy4Clq28nPyc4+z9dhX2rH2ktQnlf0amkpoyWpYLEEL4jV93bOTteQ8x8YaxjLhkhOSXgaRpEgHD6XBQVJDHvo2fUpC5m1hdyIBOcVw3pgsd20qjJITwL2WlJbz37MOcyNzFfS/MJqGtbAZuNGmahN/SWlNWUsyBrV+Tves7QuwlnN0+kpvOTqHXBf2IDA81ukQhhGgyrTVbPlvBx8ueJWPiYG557I9GlyQquNU0KaXGA/MBM7BIa/24V6sSog4Ou528E5lkbv+O4kM7sOAgJQbG9Urm3JkDpEkSdZL8Ev7k2MG9vPXk/cQlhXH/S3cRl9jK6JJEFY02TUopM/AscCGQCWxUSr2vtd7p7eJEcCsqyGX/1q8oOrqfcEcRjuI8UpOjuXd4dzqOG0qoTIQUjZD8Ev6ivLSE1/5vNoUFx7j63svpOSjN6JJEHdz5V2cosFdrvQ9AKfUmMAWQ0BEeU1ZSzK9bvqbo+AHMJdnYinJIiQ9nVkZ3EtPbkpyYanSJwj9JfgmfVnqqmP/Mf4gjB3cxYcb5ZEy+weiSRAPcaZo6AIerPM4EhjX0gsQYuUwiGmazWVnzyr8oK8wnOcaEWcG0jFSS+qfQLWWQ0eWJJrDa7Pjw33jJL+Gzvl2znBWvPcfF0y5g7oJb5GYVg0SGRLl9rDtNU12/RV3rIKVmAbMqHt6qtV7odhUepJSaZdR7nwl/rPtMa75q6DOeLMctwfhz9jYfbzH8Kr8qa/Hl33ddpObmuXTAHP7x4JwmvcYX6m4qf6y5PiY3jskEOlZ5nAIcrXmQ1nqh1npwxX9G/nBmNX6IT/LHuqXmluGPNfsKf8sv8M/ft9Tccvyxbn+suU7uNE0bgR5Kqa5KqVDgGuB975YlhBAeIfklhPCYRi/Paa3tSqnZwEe4btn9t9b6J69XJoQQZ0jySwjhSW7ds621Xg2s9nItnmL00Hpz+WPdUnPL8MeafYaf5Rf45+9bam45/li3P9ZcJ6V1rTmRQgghhBCiBnfmNAkhhBBCBL2Aa5qUUmal1A9KqZVG1+IOpdQBpdR2pdRWpdQmo+txh1IqTim1XCn1s1Jql1LqHKNraoxSKq3iZ1z5X6FSqmn3+hpAKXW3UuonpdQOpdQypVS40TUJ7/G3/ALJsJYg+eU7Au7ynFLqHmAwEKu1nmR0PY1RSh0ABmutc4yuxV1KqVeBr7TWiyruSIrUWhcYXZe7KrbWOAIM01ofNLqe+iilOgBfA7211qVKqbeB1VrrxcZWJrzF3/ILJMNamuSXsQJqpEkplQJMBBYZXUugUkrFAucBLwNora3+EjZVnA/86suBU4UFiFBKWYBI6lhjSAQGya+WEQAZJvlloIBqmoB5wP2A0+hCmkADHyulNlesSuzrugHZwCsVlxEWKaXcX4PeN1wDLDO6iMZorY8A/wQOAceAk1rrj42tSniRP+YXSIa1NMkvAwVM06SUmgRkaa03G11LE2VorQcCE4DfK6XOM7qgRliAgcDzWusBwCngAWNLcl/FUPxk4B2ja2mMUioe1+ayXYH2QJRSarqxVQlv8OP8AsmwFiP5ZbyAaZqADGByxfX1N4GxSqnXjS2pcVrroxX/zwL+i2tXdl+WCWRqrddXPF6OK4D8xQRgi9b6hNGFuOECYL/WOltrbQPeBUYYXJPwDr/ML5AMa2GSXwYLmKZJaz1Xa52ite6Ca/jyM621T3e1SqkopVRM5cfARcAOY6tqmNb6OHBYKZVW8dT5wE4DS2qqafjB0HaFQ8BwpVSkcm1/fj6wy+CahBf4Y36BZJgBJL8M5taK4MJr2gL/df15wgIs1Vp/aGxJbrkTeKNiqHgfcIPB9bhFKRUJXAjcanQt7tBar1dKLQe2AHbgBwJoZV0RECTDWojkl28IuCUHhBBCCCG8IWAuzwkhhBBCeJM0TUIIIYQQbpCmSQghhBDCDdI0CSGEEEK4QZomIYQQQgg3SNMkhBBCCOEGaZqEEEIIIdwgTZMQQgghhBv+P5YocB4LLkNDAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAk0AAAHiCAYAAAD1WPj+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3Xd8W9X9//HX0bBsee+RZSeOswMEwgojjDAKNGkDKWUVKIXSlrZf+JZCv99vof110EVLWSWUTSENKyk7YQQKlEwIIXvYjmPHe8qWJUs6vz8kK5Yt2/KQJcef5+PhB9bVvVfHBr8599zPPUdprRFCCCGEEH0zRLoBQgghhBCjgXSahBBCCCFCIJ0mIYQQQogQSKdJCCGEECIE0mkSQgghhAiBdJqEEEIIIUIgnaZRTCl1pVJqzSCP3a6UWjjMTYp6Sqk3lVLfinQ7hBDDRym1UCl1KNLtEEc/6TSNEKVUiVLq3OE8p9b6H1rr80L47CeVUr/qduwsrfW6gXyeUipfKaWVUjbfV4lS6o4BNjuitNYXaq2finQ7hDja+fLB7suKSl8OJUS6XUPly8DWLjnYOMKfLx3ECJJOkxiMFK11AnAp8H9KqUXD/QFKKdNwn1MIMeIu8WXFscBxwJ0Rbs9wOUZrneD7ShnowZJvo5d0mqKAUuo7Sql9Sql6pdS/lFJ5Xd47Tym1WynVpJR6SCn1gVLqBt971yqlPvJ9r5RSf1ZKVfv2/UIpNVspdSNwJXC776roVd/+/pEvpZRRKfUzpdR+pVSLUmqzUmpCf+3WWm8CtuMNxM725imlXlJK1SilipVSP+zyXpxS6imlVINSaqdS6vauV0y+Nv1UKfUF0KqUMvVzvhOVUpuUUs1KqSql1L2+7bFKqWeVUnVKqUal1EalVLbvvXVdfn8GpdT/KqVKfb+3p5VSyb73OkfVvqWUOqiUqlVK/c+A/+UKIdBaVwJvE5gVFymlPvP9/ZYppe7u8l6ff3++LHnSlyU7gPldP08pNcP3t96ovKUIX+3y3pO+LH3Tl4kfK6VylFJ/8Z1vl1LquMH8nP1kuVZKfV8ptRfY69s2XSm11rf/bqXUsi77f0UptcOXyeVKqf9WSsUDbwJ56shIV16Phojw0VrL1wh8ASXAuUG2nw3UAvMAC3A/8KHvvQygGfg6YAJ+BHQAN/jevxb4yPf9+cBmIAVQwAwg1/fek8CvemsP8BNgGzDNd+wxQHqQtuYDGjD5Xp8MtAFf8702+NrwcyAGmAwcAM73vX8P8AGQCowHvgAOdWvT58AEIC6E8/0HuNr3fQJwsu/7m4BXAStgBI4Hknzvrevy+7se2Oc7bwLwMvBMt5/1UV9bjgEcwIxI/7ckX/I1Gr66Zcx4X8bc1+X9hcAc39/5XKAKWOJ7r8+/P1+W/BtI8+XFl51ZAph9f9c/8+XG2UALMM33/pN4M/d4IBZ4DygGrvHlxa+A9/v4uTRQGGR7r1ne5bi1vjbHAfFAGXAd3nyf5zt+lm//w8Dpvu9TgXldfm+HQvl3IF/D/yUjTZF3JfC41nqL1tqBd/j6FKVUPvAVYLvW+mWttQv4K1DZy3k6gERgOqC01ju11odDbMMNwP9qrXdrr61a67o+9q9VStnxdloeAlb5ts8HMrXWv9RaO7XWB/CG3uW+95cBv9FaN2itD/l+nu7+qrUu01rbQzhfB1ColMrQWtu01p922Z6ON9jcWuvNWuvmIJ91JXCv1vqA1tqG93d/uQocOv+F1tqutd4KbMUb3kKI0KxSSrXg7RxUA3d1vqG1Xqe13qa19mitvwCeB87sdnxvf3/LgF9rreu11mUEZsnJeC+C7vHlxnvAa8A3u+zzii8X2oFXgHat9dNaazfwT7y3EvuyxTeK1aiU6vzsvrK80299bbYDFwMlWusntNYurfUW4CW8ZQ/gzbGZSqkkX2Zu6adNYgRIpyny8oDSzhe+/3nXAeN875V1eU8DQQsAfcHwAPAgUKWUWq6USgqxDROA/QNocwbeUPpvvFc9Zt/2SXiHjTvDpBHv1V627/2An6fb98G29Xe+bwNFwC7fLbiLfdufwXsrYIVSqkIp9XullJmeAn73vu9NXc4PgZ3UNt/PLYQIzRKtdSLenJiONzsAUEqdpJR633frvQn4btf3fXr7++ueJV3/jvOAMq21p9v747q8ruryvT3I6/7+zudprVN8X50lA31leafu+XZSt3y7Esjxvb8U74VzqfKWZZzST5vECJBOU+RV4P3jAcB3zzodKMc7PDu+y3uq6+vutNZ/1VofD8zC25n4Sedb/bShDJgykEb7RnD+BLQD3+tynuIuYZKitU7UWn/F937Az4O3s9bj1N3a1ev5tNZ7tdbfBLKA3wEvKqXitdYdWutfaK1nAqfivaK7JshnBfzugYmAi8AAFUIMkdb6A7y3xf7YZfNzwL+ACVrrZOBveMsDQnGYwPyY2OX7CmCCUsrQ7f3yATZ7oPrK8k7d8+2DbvmWoLW+GUBrvVFrvRhvvq0CVgY5hxhh0mkaWWZfkXLnlwlvcFynlDpWKWUBfgOs11qXAK8Dc5RSS3z7fp8jVyEBlFLzfVduZqAVb2fG7Xu7Cm/dTm/+Dvw/pdRU5TVXKZUe4s90D94i81hgA9CsvMXcccpbYD5bKdVZpLkSuFMplaqUGgf8oJ9z93k+pdRVSqlM3xVl52O/bqXUWUqpOUopI96asI4uv4uungf+SylVoLyPQv8G+KfvVqgQYnj9BViklOosBk8E6rXW7UqpE4ErBnCurlkyHrily3vr8Wbg7Uops/LOR3cJsGLIP0Hf+sryYF4DipRSV/vaafbl+AylVIzyzsOXrLXuwJtjXfM8XfkeWhEjSzpNI+sNvEO/nV93a63fBf4P773sw3hHfC4H0FrXApcBv8c7zDsT2IS3ILK7JLz1Pg14h4jrOHJV9xjee+ONSqlVQY69F28IrcH7x/kY3kLFULzu+8zv+OoBLsH7hEwx3qLGvwOdf9y/xHt7sRh4B3ixl58F8I5m9XO+C4DtSikbcB9wua9GIcd37mZgJ97i82eDfMTjeG/lfeg7fzuB4SuEGCZa6xrgabx5B94R6l/6ap5+zpGRlFD8Am/OFePNrWe6fI4T+CpwId7MeAi4Rmu9a6g/Q1/6yvJe9m8BzvPtU4H3VuTv8BaRA1wNlCilmvHeurzKd9wuvBd8B3yZLk/PjSDlLZMRo4FvuPkQcKXW+v1It2eolFI34+3odC/+FEIIIaKOjDRFOaXU+UqpFN9w78/w3vP/tJ/DopJSKlcptUB550eaBtyG98kVIYQQIurJrKTR7xS898pjgB14n0axR7ZJgxYDPAIU4K1BWoF36FwIIYSIenJ7TgghhBAiBHJ7TgghhBAiBNJpEkIIIYQIQVhqmlZsf0ru+Qkxhlw+61uhTkoY9SS/hs9bD/6LJ69a2Oc+j63ZQstJM8iZmN3nfkKEy9S06Ryfe1JIGSYjTUIIIYQQIZBOkxBCCCFECKTTJIQQQggRAuk0CSGEEEKEYMQmt1RaEU8SFoMFFfJC1iNHo3F4HLTSjFZSByqEOELySwgBI9hpiieJJGsSGDRRmDmgweKxQBvYaIp0a4QQUUTySwgBI3h7zmKwRG/ggLddBu1tpxBCdCH5JYSAEew0KVT0Bk4nRVQOvQshIkvySwgBY6wQfP26DVx19rVcceY1/OOh5yPdHCGEGBDJMCEia8x0mtxuN3/5+f38/snf8NTax3j3X+9Tsrc00s0SQoiQSIYJEXkjVgg+EN+99DYaG9t6bE9JsfK3F/80qHPu/Hw34yblkTcxD4CzL1nIR2s+Jn/qpCG1VQghugpHfoFkmBDRICo7TY2NbRR99y89tu/5248Hfc7aqlqy8rL8rzNzM9n5+a5Bn08IIYIJR36BZJgQ0WDM3J7TOsjcJVIzKYQYJSTDhIi8MdNpyszJpLqi2v+65nANGVnpEWyREEKETjJMiMgbM52m6cdM41BJOYfLDtPh7OC9V9exYNGpkW6WEEKERDJMiMiLypqmcDCZjPz4l7fw39fcgcft4SvLLqCgKD/SzRJCiJBIhgkReVHZaUpJsQYtmkxJsQ7pvCefdRInn3XSkM4hhBB9CVd+gWSYEJEWlZ2moTyWK4QQkST5JcTRq9+aJqXUNKXU512+mpVSQ3t2VgghRoDklxBiOPU70qS13g0cC6CUMgLlwCthbpcQQgyZ5JcQYjgN9Om5c4D9WmuZu18IMdpIfgkhhmSgNU2XA0FXiVRK3QjcCHDDXddyzmULh9YyIY4in3/0BW+tXENNRS2ZeRlcsOw8jj1tbqSbNdZIfgkxCJJfR4TcaVJKxQBfBe4M9r7WejmwHGDF9qeCTF0rxNj0+Udf8M8nVpK/JJf8/Bk0ldj45xMrAcZs8Iw0yS8hBkfyK9BAbs9dCGzRWleFqzHhds9P/sDi4y/l2vNuiHRTxBjy1so15C/JJXVKEgajgdQpSeQvyeWtlWsi3bSxRPJLiEGQ/Ao0kE7TN+llaHu0uPDS8/nDU7+NdDPEGFNTUUtyfkLAtuT8BGoqaiPUojFJ8kuIQZD8ChRSp0kpZQUWAS+HtzmBGuubuOs7P6OpoWlYznfMSXNJTE4clnMJEarMvAyaSmwB25pKbGTmZUSoRWOL5JcQgyf5FSikmiatdRsw4itDrn3hDVxle1iz8g0uu+mbI/3xQgyLC5adx6O/ewyPxU1HqwtzvAmDw8h3fvrtSDdtTJD8EmLwJL8CRe2CvY31TWx8Yy33Lc1l4xtrh+1qTYiRdmB7MR26g9xz0yi6cTy556bRoTs4sL040k0TYSL5JY4Wkl+BonIZFfBepV1SqJiaHcslhW1ytSZGje6P5x7YWczk63JJne69tZI8CWKSzKx95l2+ftPiCLdWhIPklxjNumZYfU09RTdOIG2G5BdE6UhT51XalccnAXDl8UlytSZGhc7Hc1MXxTH/rhmkLoqjvc1BYkHgYq3JhQm0t7ZHqJUinCS/xGjWPcM8bg/xE2PwuD3+fcZyfkVlp6nzKi09wTsQlp5g4pJCxZqVbwzpvL+45dd87+s/5OCBMi49+XJe/+ebw9FcIfyCPZ5rjjPSuK9bIeU+G7HxsRFqpQgnyS8xmnXPMLPVhO1gO27XkU7TWM6vqLw9t/WTLbxf0c7zX1QEbE+r3TKkIe677v+foTZNiD7VVNSSnz8jYNvE03MpXnEY45UGkgsTaNpno3jFYc5fcn6EWinCSfJLjGbdM8ybX5XkX5ZN2vTkMZ9fUdlp+tVTf4h0E4QYlM7Hc1OnJPm35czLpGFTG6XP1NDeWkZsfCznLzl/TNYDjAWSX2I0655h05dOxt7gYN9jFaArxnx+RWWnSYjR6oJl53mXGFjinQCuqcRGyarDXPvTq/pdckDWdxJCRFqwDKPJwI9+/YM+82is5Jd0moQYRp0h8dbKNeypKCMzL4NvXLcspA6TrO8khIi0wWTYWMov6TQJMcyOPW3ugIOia/El4P3nEu/2oy10hBDRbaAZNpbySzpNQvRhpIacgxWQJ+cnsKeibNg/SwgxNkh+DT/pNAnRi5Eccg5WQD6W13cSQgyN5Fd4ROU8TeFQXVHNjy6/javPuZ5vLfo2Lz4+omt3ilEo2JxL+UtyeWvlmmH/rAuWnUfJqsM07G/G4/bQsL+ZklWHuWDZecP+WWJ0kgwTAyH5FR5jZqTJaDLy/f/9LkWzp9Jma+M7l9zMCacfT/7USZFumohSIznkPNgCcjF2SIaJgZD8Co+o7TR9um4DLz33EofLKsmdkMPSK5Zy8sITB32+9Kx00rO8C51bE6xMmjKRmspaCRzRq5Eech5MAbmITsOdXyAZJgZG8is8orLT9Om6DTz6yHLyF+cxsWA2jcUtPPrIcoAhBw/A4bJK9u7Yx8xjpw/5XOLo0b1osmjWVDau2hgwX8nOJ4tJSEjgtkvvGFBh5ViZw0SEP79AMkwE1zVn4uJjqXyykhnXFgTMGTf/xPnc88M/DiiLJL+OiMpO00vPvUT+4jzSCpMBvP9c7N0+1NBpa7Xz85t/wS0//x7xifHD0VxxFAhWNLlx1UbmnzifPWv3sqeiDIvFgjHWwIRLM/0hFEph5Viaw0SEN79AMkwEFyxn9qxoo3RFNQ6H95bZ/BPns3HDxgFlkeRXoJA6TUqpFODvwGxAA9drrf8TrkYdLqtkYsHsgG0pBYnsLisd0nldHS5+/t27OXfJOZxxwelDOpcYmGi/Unlr5RoSZ8axe1UxbTXtWDNjyZiZyp7te7njr/8NwD0//COpi7IGPBfJWJrDJBodLfkFkmGRNCozbF4qhlILd/z1F4A3wwaaRZJfgUIdaboPeEtrfalSKgawhrFN5E7IobG4xX+lBtBY3ELuhJxBn1Nrze9++kcmFU7iGzdcOhzNFCEaDVcqZXvKMTRoJi7OImFSHLZSOwdXV+OpUf59BltYOZbmMIlSoz6/QDIsksZyhkl+Bep3ygGlVBJwBvAYgNbaqbVuDGejll6xlJLVFdTva8Lj9lC/r4mS1RUsvWLpoM+5bdOXrHn5Hbb85zO+feFNfPvCm/j0/fXD2GrRm5F89HWwPMrNuAvTSZpsxWBUJE22Mu7CdDzK7d+ns7Cyq1AKKwd7nBi6oyW/QDIsksZyhkl+BQplpGkyUAM8oZQ6BtgM/Ehr3dp1J6XUjcCNADfcdS3nXLZw0I3qvO//0nMvsbuslNwJOXznphuHVA8wd/4cPih5Z9DHi8EL95XKYIfNux7naHVgTjDiandjtBhxO9zEJJowGY/8ifS2GO83rlvW5+cM9jgxLI6K/ALJsEgKZ4YNR35l5mWg3ZqYRNOwZ5jkV6BQOk0mYB5wi9Z6vVLqPuAO4P+67qS1Xg4sB1ix/Sk91IadvPDEYXvSRERWOB99HeyweffjPvntZ7SWOzAYjWiPE5PZjLHNwrjJef5jBjsXyViawyQKSX6JIQtXhg1XfjWV2Ni/y42nQeE2axwd9mHLMMmvQKF0mg4Bh7TWnePAL+INHSFCMpArlVCuurru09baxvRr84dcnF140SS2PbcXU5wBj1NjSTBjcJi49r+uCThusHORjJU5TKKQ5JcYslAzLFL5lToliYLzx7HrmVKsmRYcto5hzTDJryP67TRprSuVUmVKqWla693AOcCO8DdNHC1CvVIJ5aqr+z7v/XQ9bquDNpsda0IcMPjibHO8kawzk0kcZ8VR46ZuXcuw/PwiciS/xHAIJcMinV/xWXEoM+SclY4l0ygZFiahPj13C/AP35MnB4DrBvpBGu192Ff1u2vkaF87xbAL5UollEdbu++TkGvF2eKi2dzsD52BFGd3nqfkvXImLsnCmhZHzsRsKIL0nOYx+1jtUUbySwxZfxkWyfwC2P/mQQq+lkfhgnzvBsmwsAip06S1/hw4YSgf5PA4sHgsYNDRGTwa8CgcHkd0tm8MCKXYsqaiFseWWDbev40OuxujWdHybCsTL8kia1xmyLPedh9utx1uY3xiOklpSb1+thidJL/ESIhkfjWV2Ggpt3Ps7Kw+P18M3YjNCN5KM7SBxWBBReFftUbj8Di87RQREUqxpdvh4dDGKiZ/K5ekKVaa97dx4B+HKX6+ksZ3HSHPett9uN2oTRjbLP6rvWCfLcYuyS/Rn0jmV2ZeBuPyc+modcGR6cEkw8JgxDpNWmlsNGGL5tHj6MvCUSGU4seXH1nN2lXv0t7aTmx8LIuWnAMQsG32cbMoWVXWZ7FlW3srEy/PIibFhL3aQUyKiUlLsylbUcufXrwHCH3W267D7Z21BrFWizxWK3qQ/Dq6DSbDxo3Lo7y8Iiryq/NnkKkBwi8q154To0coxY8vP7Kat19/m4Krc0kuTKBpn403n30Lt8ND0bfH+7d9sWIrc2cfQ8Pall6LLZ12J7GZZmKSzRhiFB6nRrs1TrvTv89g5lSRx2qFGJsGk2G1XzRS8nIJWaemMuH8gojnV9e2SoaFl3SaxJCEUvy4dtW7FFydS+r0RO8+0xMZd1E6lR80BGzjcvjyme08/OZfe/08ZVR0tLix5nknszfGel8r45HL7MHOqSKP1Qox9gwmw2IzzBRcmcPhtfUYTCoq8gskw0aCdJrEkIRyVdTe2k5yYULAPnHZMTjqnGy/v5j2WiexGTFkn56G3WbvswAyxhTDoVdrMJgMJOTHYitp59CrNXicmtsuvYPMvAyKZk1l46qNMkwthOjXYDJMa3DbPdhK7Gy+a7fk1xginSYxJKFcFcXGx9K0z+a/SgOo3diMKc5I3lcySJgYi+1gO6UvVuFxeUhdFNfrMHnBrHwcOTYOvVpDe70TS6qZhAIrJoOJ+Xd6j9m4aiPzT5zPnrV7ZZhaCNGnwWRYy75WajY1U/DNHNKPTZL8GkP6XbBXiL5csOw8SlYdpmF/Mx63h4b9zZSsOkzRrKnc88M/ctuld2CNt7L38UM07GrB49I07GqhZn0T4y7MwJprQRkU1lwL2WekYEm19Lko5gXLzqN9v4s535jGub87hXELs2gtbqfwokkBx2z6cEsEfytCiNEiWIbtfLIYW5ON2y69g3t++EdmHzeL4hWH/RlWs6GZnDNSSSqMl/waY2SkSQxJsOLDYI/Mbn/ayb5HD+N2u4iNjyU2JpacGVnY6lrweDQGg8KaHYvBFPgIUPdh8u6f11TfxJzrppJz3JGrwvZGB7VNNUy6PGtA6zkJIcae7plisVgwxhqYcGlml9tjZcydfQxfPrOd9tYyPB5NysQkXLYOWptdkl9jiHSaxJB1Lz4M9sjsrGum0LDWzh1//W//PqrVyMSpE/3H7fu4BEuCOeDcwQogu37ePT/8I7EploD39795kPzF/T+yK4QQ0DNTUhdl9ciPhrUt/iLve374R+KII3XqkVt6kl9jg9yeE8OupqKW5PzAwu/k/ARqKmr9ry9Ydh7bn97Pzvf3ULyjhJ3v76H8jRoMDlOPW30XLDuv188KNrTeUm4nJ8jMuF0/XwghgpH8En2RkSYx7EIprDywvZjWhlY6PnTianNjshpxtnQw88RZfc5z0p3MjDuyWptbOVxSCYDWmuINe9DODi5/6FsRbpkQw0PyS/RFOk1i2AVbF6n7I7NrV73L1OvHBzxR17Crhc8e3UrhnMl9nj/Y7L2dt/0635eZcQem/EAFLqcLgNqD1VTvKUcphUdraG4lNdEKgHK7OWf6eJSvdOOGs+eQm5Hc22mFGHUkv0RfpNMkhl0oM9MGm7vJ3e5Bx/b9yG4os/fKzLherc2tNNV51yJzu9xsf28r2u0GwN7YSo6vI+T2eChKi2dCSjwA85OsnH31WZFptBARJvkl+iKdJhEW/c1MG2zupkNv1zDx4sw+CyBDmb03lM8fzVoabbS3tQNQtr2UuoPVAHQ4OjDa2om3egtLTS4Xx03M9B93/YXzSEuy+l8rJYuVCRGM5JfojXSaRIAHf/YImz7ejNvlwWgyMGXqZExWU48ZbpsbWnji549y/f+7kcSUhH7P231IevZxs/hixVa4HP/ac20V7aRPSaHyYBWujg5MZjMJGYkBBZChrssUygKc0cTtctPSaPO90uz6eDvtLXYAWupbSAQMBu9zG/EeN5OzUgA4LzeVBZfM958nzhIzks0WIqp0z68TFhzPgq+c0iMLJs8qGFB+QWCmdM49N/X68ZJfY4x0moTfgz97hM+2fsbUG8eRNMVK+dpaSv5TQuHS8cz/TuBQ8uE9BzFUlPDRy+u48PqL+zxvsCHp7vOexMbHkpScSENpM2lzkoixxOF2uCn/ooK4+Fj/uUIp0gxlCHyktLc58PhuiVWXVXPoyxLAuwxDY2kVSfFx3v1a2pidl+ZfqP7yaeOYNm8KACajgcQuvwMhRE/d86t5fxsbH9/Ejp3bmXv9NPLzZ1C/t5nHfvcEWclpOA6X8/Tdj3P6kjOZu/CYPs8dLFO6zz13NOaX6CmkTpNSqgRoAdyAS2t9QjgbJUZG96uZXZ/voeimcSRP89a22A7YKbgyB4MV/2y1LIF//u1Fmg7XkGjW7PzH66TkZXHKBSf2+jlvrVxD4sw4dq8qpq2mHWtmLBkzU2kpbQlY3PJnV95F5fsNxKZbSJgUR1uFg8r3G0g2pvr36SzS9FzixmV2YOqI5eCrVQFFkqEOgQ+Wx+PB7fJ2hNwuD1vf/wzt8gDQWFFHrFt73/N4sHZ0kOWrFUqPj+WaE6b6i6gzzj0Gs8k45PaIvkl+Hb26ZlhtVS1FN43351fytHjiMmLIPD3JnwWmGAPpM2Mp/riU9DgDOz7fQYzD3W+nKViG5ZycjqHU4i/iHi35JYZmICNNZ2mtZaKIo0SwqxnPZjeJBXH+fdprO0iaYqWtwuHfZs4wcfjgYY6/No8p0+LZv7uVFY+vIC4httc/6LI95RgaNBMXZ5EwKQ5bqZ2Dq6vx1ATW1DgcDooW51P6ZoU/mIouyufg6mr/Pp2f8Y/7nqOxso6UnHSu/NEVAZ8d6hB4Xw7uKqWquAoAl9NF/b5yrLHeWiF7k42Jqd4hfa3h8mMLyPK9TjhmEpmpicFPKiJJ8iuKaa0HfEz3DHvz5n+TMCkO7QHlm4HQ1eYhNvvILet2uwNXChjjjSz6+RT27mplwzO7mf/RF312SELJsGjKLxE+cntujAp2NWO0GGne10bKTG8HIDbDTPP+NszWI/+ZHP6yirisGAqmx2MwKAqmx2NzwuvPvdlr6HiUmwkXZpI02VuEnDTZyrgL0yl9piZgv8y8DGJTLJx825Grvob9zT3mJ5k8q4Bsk+LF707i+6+1MmX25B7n6W0IfO+WvdgaWwCw1bXQfrgBg9GAs91JgttNXKx3Rt/JmclcMnOC//ii02diNMpcsEIMt7NnT+L+Vz6m3HCkA3KwzcnX77i8z+O6Z5gx1oDtoJ2kyVaUr/7PZDXQXuWEWd5jOsxOWkrsKI9my2s1eDQkz7H2mV8QWob1lV9P/vffmD3RO2Gl3e7EUGvj6pkJvL7HRssnO9i0ZZ//GGdDe0B+uTs8fPqrz4hrN/PErQ9z3b039/s7FeETaqdJA2uUUhp4RGu9vPsOSqkbgRsBbrjJOIulAAAgAElEQVTrWs65bOGwNVIMj65D2U31TcyZPzXg/XGnZlG8opLJV+aSNMVKwuQ4iv9RSeHS8XjcHm8t0iuHKTo/HZMv4EwGhcnhZsf2/dx26R1BixZNRhPmBCPtNgfKqNBujTnBiMkY+J9fKPOjAHz8ygd8tdDA1GwLXy20s/aZtyicN837ptakxyby2QPbSS6Kx+F0EGeOoXlPK6fNncnk0kpmjk8DwJIWz8zzjhvuX7OIPpJfUawgL517r1sUsO3mZ9f12K97OUHZnnLyv3Os//1xp2ZT8nwlky7LJmVaAs3727DXOql5t5mGSc0k5ydgK2/F3ezixG/kMn629+KwssnFR78t5Z4f/rHXwutQMqyv/HJ8doA/XnM2APf+Yw0/OjWRW89I5t4PmyDBxK1Xnus/T2VVA288sJXkonjcRhcGp5EYh+LhW5byxeEm1j6wmnaXm4tuWYJBLuRGXKidpgVa6wqlVBawVim1S2v9YdcdfEG0HGDF9qcGPtYqwqr7UPaBTw+y5/USlEH5F4uccFIuDett7F1eHvD0HFtNbHxzp3fEpw0cZiO7qr0TITbuaaVyfQP5y7KZdkZh0KLF5PQkWssdWA0xaLdGGRVt5U6S05MC2tj1sdzd5QeJT4jnlDNPIcYSw0cvfkhbZSPtDgcfvv4Bpskm/vymkzanh40fr+O6qeNJSYwDBT+5ZTGf7jiGW5e/TF11C8lZqTx925VcePLMkfp1i+gi+RXFtpdW8fA7WzGbj/zvyOYJ/FcQrJygtNTFgTWHKLzQu37lnMuLaK93svfv5WgPGE0G5i84wf/03J6KMhpq6sk6JQVbVqw/wyo/aUBZdZ/zK4WSYZ37vvTIK2w5uJOERCuzZk2l+vMSCnwj2LWNNl77YCMrl3lv4V8zL55lKzfyrYsXkJ7srcV69vbLePPTWdy6/GXKqxsZl5XKvd//BheePJMLfZ/12NrPePOvqzDHyM2i4dB6cj3HX3NSSPuG9BvXWlf4/lmtlHoFOBH4sO+jRDTpPpSdd0w2bo+Lfa+XkjU3zX9V9J27ru8xTO2fXuCXN7L/ywM8+eenqbY04bB14GpzkXFiMpPmTwgoFn/x0ZePjGrVNWOyGZh0aRbWFCO121up/rCReJXIGw+sxugblW9vaiU70crCqYW4J0/mpInpjEuJhxYbafmZHLPoWO79xxpOOMV7ldZpXFITm3Yc4PO9h1h+59WkJlmZP30iuSbFqhvHc/NrbZw4Y9KI/a5FdJH8im4b9lRgnDmR9Lx0/7bCrNSAfYKVE0y/ooAvH9tHelGKf2THZDfzX7/9YY8M65xi4NIbvs4/H13JgYNVOGwdWBLMtJTbOfZ70wLO3X6eg0d+/Xes8VYy8zKw1bfR8YGd8YszSc2PpaGknaoPAou8wdtxqthazNMPBJ8c9unXP+HiQgMZCd7/9WYkmLi40MBDL77vz6/05Ph+8+vbi47j24P4XYte5IReYN9vp0kpFQ8YtNYtvu/PA345+NaJSOheXGhNiGPc3DxKVlSz8Rc7+5x19uNXPvBPL5BbNBFjrIGshelYMo20HGqj9j/NVG9sILXAe+VWs7OO0h1lTDg1m8SiGBwHTbSUtLHn4YOgIS7VTNb0ZNq22nlo6akkWC09PrM367bsoaLawXPbqgO2u/QW0kwOnnrtY2698jx/OE3LsnBxYbt/uxhbJL/Cp6mumcaaxj73qa/t+32AS0+dQea2Yjh8pE5/+T8/YMHVR25ZHS6pJD9/TsBx2TPT2RdziIa19l5nzq6vbqClvoWPVn+I/cB+Pn3DgMGiyDrLm1+OGjf2VRXgPHLeNpsdt9WBW7mYf5d35OnAIzbS85OpeqOGskYXMSkmxp2RTd17Nrrra9JYya/RL5SRpmzgFd9/CCbgOa31W2FtlRh2wYqjO2pdTJ0zJWDdo66c7U5Kdh9k82sf8H8LU/jJ82voiLEQVxhD+14nHDLR1uzAnGjkwOulHH+ptyC7w+UgY2oiZ99QBMCeQ9U01rTR8G495988AZdbs3VbKxMaEwfUYQL4159+0GNbbaONZbffx8MXZ3Dzaxu5+PRj+x0CF2OG5FeYrHv0Db42a0Kf+xx/xux+z5MYH8vFJwc+LZaTlsiBvUeeFtOtHUEf7hg3Oa/X/AL44JHXWTQ1l+J1m7jphATuXPsl+V/Lp3CBb+TGG1Hsf/Mgucd7Z89vrm/G2eIiIdfqHz2ftDiHQ29XcdGtkzAZFC6PZtsXraR1GxHrj+TX6Ndvp0lrfQDoexILEfW6Filaks00ldg4tKaa0xeextq/vwFKoT0aZ10zKQneaQdc7U6ayis5JqkDV6uZq2ZZ+Pt2O2deOx2D0YBSiqbWdspqGtj50EFaktpJJ57Klxo4admRJ9osRhOWJBPNVQ7sdie2She1b9cwf1r/gdqb2kYbN93zLMvvvLrHVdlPH3ghpCFwcfST/AqfpEQry86Y0/+Og3BC0XhOKBrvf73iw218turQgBexzUhNpLm+gevnxXHLacnc9UEdhrjAeqmc2VnsffYQNTvrsdNCW1UHNf9pYuoFR26JpUxMYF/VQRpL20nKMdFc6aLu7RqmzzmOd/7+BrHtHdTZnSz+yWUh/XyDya+nXvuYay461X+cZFhkSBXZUcLpcNLmW3bD7XKzfd1WPL4JGJurG0mJMTMrfSJbH9lLm81OdnoSi+fNYmFuOl/pMuFibIzZP7zceQW0clkGGQkmam0u/r7NRk2xjZyp3pqi5PhY6kvMuNvc7PhzMUkJVqakphGXfGRulA6XC3u5A6fNzcv/c4AYk4GsGAuVIQzd9+bp1z+hobKMh158n/fXbw24KnvkgRKKD8Xx3DZHwDHdh8CFEKND0cRs2hPi2Pb4LlqabCQmJzD3uBk0llSzruSdXo9rrKpn555d/nyYmh7D3n9XotqVf1qCttp2EuPi+ezeHXQ4nGiDgczZabQdtnPgsHe0q+ZAHR6H5p0/l+J2ezAaDcSZzOx37SPLaOHvP17CD//+NuuefYeSnaX9/jyDya+8qj0ANFSWSYZFkHSaolybze6dQREo3VFKXal3wkWnw4XjcD1W3xT97lY703OODBX/17zJZCR7H6m1xJiwxg58TbJgRYtLJsfx5vMlGK8sID0/gboSG+Wv15LgcDEny8C+hja+d/VF3Lf637AY0vMTSHbGUr2hgaLkBFZensrNr7Xxwh9+3OuVUtersPTk+KCvX/tgIw9/PYNvPPcfls62BrTxplPTYNzxAaHSfQhchrqFGD1+dcVCWtocwPkDOu7RhjpMniMZdvfCNG54tw5rCkyelU79wVZKt7bws6ULuffJVUzONrCj1oPVrsgdF0vaxHjqD7bi3BrDL3+4jCdXvcOfzo/jtrftPPmLm0lLshJn8T4Z96drz8XW5qDphCksveNvw5pf0CXDvi4ZFknSaYoArTWuDpf/+52fbMfe7Fucta4Zg60do8GAw+Ek12IiPs7b4ZmZncIZx0/xnycnPSmsK9X3VrSYF2OlfU0bH1UeoiAnjTSHiYIcAw6XZnq64vV/f8bvL7+Ev6xe59/ntNwJTB9fFlJhY+dVWNeiyO6vO4ezF01y88SmZlbv7ghsY9WegPNLYaUQo5fJZCQ1yTrg4z7aur9HhlnbjJS8WEXdW80U5KTxp6sWs/zl95mV6c2w43ONNLlj8PzbycbKSv8+O/cf4mvTTJw40crXpnWw+oMtARli9rXxiVc/Gvb8AsmwaCGdpjCpPlRD6bZi7wsN1bvLsMZ4r0ham2xMTPbOqK01nDEtj+lFuQDEmMczLjMlUs0OEKxosbvdpVVc8P17MCXCo5fE8Z1X7ezasY/CWy7j7d9+DzhyhfSbEAobu16FdS2K7P66czj7p4vy2NzQ0u/IlRRWCjH2hJphn+/Yx4QuGXa47jBP/fxOpk7wzuJd22jjD4+/0m+GhCO/up5XMizyjHffffewn/TLmq3Df9Io4PF4+PKjbVTsL6ey+DA7PtxG8UdfUrxxDwfW72LfO5tp+HwfFet3Yiip4oppeRyfFMe8pFiuOmUGcU4Hb/17I9v3FGO3t7LslFnc9JX5TM1LJzXRSmqilaQhrGZf22jj6rsf59z5M3q9Hdd9n1COCWZ3aRXzr/0Nn355gCxjMxdNNXPJNDMN7Zpmh+b97Yc5d/5Mrr77cQ5V1TM7roZzpnqvFK0xBupanGytdDF1YnbA5//tpXUUmSu5ZGY8dS1OHlu3n0XjnT1eBzvXKXOnBG1r5zkHcowYoAkn/SLSTRguR2t+DdXnH33Bk398hlce+xefffQ5uq2DK08f/MMc3Q0mv0I9rrvO/LpowRx++sCLpHgaemTY25+X8/K6zzh3/gyefv2ToBnySUkbf135XljzCyTDwi4hG1LzQ8owGWkCKg9WUV3mHb51d7ip+OwAFt9Mq/YmGzkJ3o6MR8N5M8aTk+rt2aecOJWCLhOy9eXNT3dw+4pXmbw4m9Pys6grsXH7ilcBhm2m6u7DwKHsE8oxwdzx4Iukmexs2llCkgV+uiCGrVUu5uUauG+9G0/9Qf+5XyqrwmTQPW/zdStsvOaiUwOupi6aFstDH+3n4R94n2K5Zl48f/5zMaUp8UGLJHtrf6+3Gfs4RghxRLAZudc/8CVvfrojovkV6nHddebX7fe/wKadBzG43T0yzF5ZzOzcOJ567eOQ5lcKV36BZFg0Oao7TQf3lGFragWgctchXM1tAHQ4O7B2uLD6aoWy42O5ZFqe9yCzYt5VZ2Lx3UobLn9ZvY7Ji7PJ9M0zkjklCRZ7tw9H6HQfFv7WxQvQWvdagBhs6DjUod7dpVVs27Wfl5fFc8GzNm4+LYOFc9P87/+osYnW9Dldzu0t/A7Wnq6Fja0OZ0Dh+es7bVwx20Rdk43vv2Rj+bI8/uvMdBh3/IAevQ1liF4I0btgM3KnTE8Ia351L6LWWgfNuIFmWNf8+vrK/Vx94elk2HaxcO6RVQZ+UN/A45tb/eftvH3WvT1dHy4JV36BZFg0GXWdptbmVg6XVALeIuriDXvQTm8Rnb3FTrbVglLeWqGitHjm53mfKJs4r4Ap4zMj1u7iynpOy88K2Jaen8BHlYeG5fzBigSBXgsQu84HMtDCwjsefJErZpuYm2MmLRYe/qSBF3e6AvZx6S1cM9c8oPY8/d4WTMrjv5oqr23BgIdHNtcwLlEx//5DpCXGyaO3Qoyw7isKAMSlxFC8rX5Yzt9bkXPXUSQgaMYNNMO65tcVs0088/Z60pNiA0ZxmlvbsRpdA2qP5NfYEDWdpvIDFbic3v/x1h6spnpPOUopPFpDcyupid57ucrt5pzp4/3zCn3nnLnkdFv4NRoV5KRRV2LzjzQB1JXYKMhJ6+Oo0AQrElzy3HqMBsXyXgoQv3mMlYce3M/vbpjgPyaUwsLOq7SHrvdOZ7D22iROf9zG87/5XkDR5LLb7+OaefEht+eaefG8tq+FF/5wW4/CSu/VnDVgxEoevRXh8p/7V0W6CVHH1O5m5/MHmHVVoX+bvdFJYZjya9nKwJHwG1dvwKM1r1yR7N9n6fM9t/WXYd3z6+YTY3nuSxvP/+bWHvk10PZIfo0NYe00tTa30lTXDPgmXHxvK9rtnXDR3thKjq8j5PZ4KEqLZ0KK9z+cE5PiOevq4AsejlY/XrzQW8Pkm7uorsTGgdVV/P7yS4Z87mDzKWWa25mTbWRaVlrQWWaVy84Vs028tsPGrVmWgFln+7ry6bxKy00wApCbYOSK2SZuv/8FXvn99wPaA7D0iTKWL8vrtT1d9wn2+b2NoMmjtyJc7rvm7Eg3IepcUJTDlfc+R8Mpzf4ZuRt32fjxrV8d8rl7W8S260j4meMa2FblJiMh3b/PmeOcPbb1l2GDza/e2gPJkl9jjNJa97/XAP3g5q9rALPbzbETMvzbLz5hKmld5toI5xxD0ejNT3fwl9XrKK6spyAnjR8vXjgs9QBfve0BKqqPLHbpcntobrHx/GUJnFSUQ63NxYIHyoizxmEyesOguqEFt9uDBwPjMhL9x+ZlZfR5/zx/8R10OB09tptjLJSsviegPfUtdlJMTuqdZgzaFbQ9zW0OUkxOGl0x3qHrLp/f9Yqvc0bypc83+6/wOrctW9n/I7sizE695ej5Y/7k/uEPxaPAvFsepLqpiZZmG4lJCUzOyeLD398w5PN2zy/wZpi9zc7HP5hARoKJ9Xsq+eYLNpISEwIyrMNNQH5B3xk2mPxqdMWQZLUEbY9bmUiP6ZD8Gu1y5sLkM0PKsLB0miR0Iuvef6yB8s3cesaRwsZ7P2wKOstsuHQdlv7Gc7UsnW3l/849MmP5vR96i8XfX781YOi6a3AE+zl+/kYl26rcvHLduIj9bCII6TQd9Tpcbly+OwUAZqMRk8kYls+KdIZ1v6121knHEF+3LaA9f1jnLRZfdXWm5NdoN4BOU9TUNInh09fjqd2f2thdWsUFP7qPNff/2H9Pv7vuywCEIpRZb4MVi3cNjmA/R3VDKx1uOOFBefRWiJFkNhkxh6mT1F1vGZZ+aCcfbzsQkEXhyLCeD6ls7jFtSm/F4n39DJJfo5+MNI0x9/5jDa+t/YCLF53JrVeex9duf5CDB/YxcXKh/55+f8f0J9iwdPch6FD2EaOIjDSJERAsi4Y7wyS/xqABjDQZwt0WET1qG22sfm8Dd5xqZPX7G/j0yxI+37GPtDjF5zv2sdc3wWdto42ld/yNuqbWgPlTXvtgI3W+ea/60lthZ2cRZKj7CCFEp+75VdfU6l8CpWuGdc2vzuMGkmGSX6IvcntuDHn69U84c5yTSYluzsxzcvPvnmZ6uvIvtNv5BEl/c6P0d6UWyuy1MsOtEGIguufXU699zL+37u2RYacfM7XP+eD6yzDJL9GXkDtNSikjsAko11pfHL4miXDovEr73WluJqWamJXq4IkNjSSmGgMW2l2/vaTPuUhCmcsplNlrZYZbMZIkv0a37vn1lQIXt7z1CbUNTQEL7e7asY/DVbU8tbT3+eD6yzDJL9GXgdye+xGwM1wNEcNvd2kVBUt+xt6yav9VWn6qkViTgae3OpmZaWTxNBPTMowsmW5merriu/c8zcIJ8LPXq5mf1U6GuT3oEHSwIfCur4WIMpJfo1Bnht373NqA/MpPNdLe2kxRGiyZbvZnWFEatNla+Nnr1Zw1kR7z03Vm2IMvvN8jryTDRChCGmlSSo0HLgJ+Ddwa1haJYdN1Ucp2h4sv97bw2g4DBgPsq3WRZFHceZrxyCKVn7ppdtahClNpaHUyIdHI1goXx/610j83CvRcaHcoC/8KEW6SX6NXZ4b94631eFwOf355PHCwwUWTXXHnaQZvhuUY+MunHuwuNxlxGtwd7NhTQvGhuB4L5Lo8m0kzO4e88K8Ye0K9PfcX4HYgsb8dRXgM9JHZ7otSXnbBaSwa18YtpyZSWlnHK3ugxdbK9PEp/mVovnWoks3lLt7ZY+N/To/h1//u4LqT0okvmB8QIt0X2h3swr9CjBDJrwgbzLQlXTNs8fOtXHdyOv9zdgqllXXk56Zz2dOHmZNtZOHcHAAq65q55lgPL27v4I7TYvj1v21cdUJq7/l1cdKQFv4VY1O/t+eUUhcD1Vrrzf3sd6NSapNSatPy1fIEwXDrXpzdn+6LUj7/9gae2+Zg1r2HOOuxBn6/rpHHPuvgxIdrOOHBak54sJont7Sy4WAbx2bD+EQ4Nhue2NTMui17erTlSGGlodvCv/IEiYgekl/RYaD5BYEZtmSakUc/bfDn18w/HeLTg+08uaXVn18nPlzDY1ucgB5Qfj312sdBtwkRTCg1TQuAryqlSoAVwNlKqWe776S1Xq61PkFrfcKNixcMczPHtoE+Mtt5hXbzibGAd1FKk3by4B3XMj4jkXdunsSc/Ay+WPErDr72JzY9czebnrmbLc/+gpkTM/jmHAuT02P45hwLEzISeeLn1/VoS+divBdNi2Xbrv1ccax3eZxr5sWHPDWBECNA8ivCBjNtSfcMu/0MK3EmTUaSlXdunsT4jES+XPHLHvlVND6DJ78WH3J+XTMvnpffXc/q9zcEbJMME73pt9Oktb5Taz1ea50PXA68p7W+KuwtE34DvQrqvELLsBrYV+ck02rgitkmvnvP032ep3uxeH6q0f9ob/e2dBZWvr7TxhWzTdBhB2S+EhFdJL8ibzCjOMEy7GvTjCQr27DmV+fi5mfmOWXOJRESmacpynVeGQ3kkdnPdpexwdnBo5sbMSiNRysMStHhrueab03u9TxrN+wMKBb3eKCmzcPspp29zk9SXtuCAQ9PflFDVmq7vw0yX4kQYjD5BcEzDDTTMoy9nmcw+QVQXtvO5nJ4o1jmXBL9G1CnSWu9DlgXlpaIoPqaeba3P+iS1fdQ22hjya1/Jl630qbiOW/BccTXbevzPItOnMGicW1BFsmc4X8t85OI0Urya+QNJr+gZ4aV2wx845h4/6Lfkl8iUmSkKcoNdubZp1//hExzO02tHWTEt/NSkAUnu59HZrkVQgynoWRK1wzLjYUnNrl7LPot+SVGmizYexTqvEIzOppYfomVG19tw2NJZtWfb5XHaEV4yIK9YhhJhokRJQv2jm2dV2iLp3lnyl08zUyGuT3oTN5CCBFtesuwh17sOZO3ECNJOk1HobfX72RrhZ2Txit21HRw0njF1go7a9bvHNR8KUIIMZJ6y7B/vrNJ8ktElHSaosBwj/6cf9IMfnB6JgtmjmdmQR4LZo73vj5mSo/5UmTkSQgxFOHIkGAZdvOCdNwuZ4/5niTDxEiSQvAoMNxrHvVWEOnSW7hmrtk3z0m7/2pN1lsSQgxWONZsC5Zhza3tWI2ugPySdS/FSJNOU4R1nS13uNY8CvZYbed6S11nvV3y3HqMBsVyWW9JCDEI4cgv6JlhnfnVfb4nWfdSjDS5PRdhI7XmUV8z4cp6S0KIwYhkfsm6lyISpNMUQcHWQQrXmkfrtuzhuW0O/+KWx/61kq0Vdi4qJOyfLYQ4+kQyv054sJqnP7ezY0+JrBknRpR0mkZY16LFzqsngKVPlKGUCtvV0r/+9AP/wpabnrmbay46jR+cnslJRTmArLckhOhftORXZ4bddGqarBknRpTUNI2wrkWLncWOD3zSSIrJyfz7D5GWGDciM9jK7LlCiIGKlvwCyTARGdJpGkHdiyZf+MOP0Vqz7Pb7ePhiKze/1sYLf/jxiBQyyhpMQoiBiKb8AskwERlh6TRddt9H5CUqXB0dnFaURpzZxHknFGKNjQnHx40agUWTRx75775NrpKEENFG8kuIMHWazvv+bwFwuTrYfmA3LqeDl559G4XG2FZHSkIshRkxTMlOZPrETPJz08PRjKjSeZXW9ZHZzkf+X7o8yb9t2Up5bFYIEV0kv4TwCuvtOZPJzKSi2QBMmX18wHvlB3ZT1mbjhXWfou2HsLe2kB3nAe3h/DlZxMfGMH/GROIsR8foVG+P/M/JNgYtZJSrNSFEtJD8EsIrYjVN4yZPA3p2plwdTt7Zthl3q4O/P/UhJqNCtdaSmhDL7HFJzJyQyoSsFHIzkiPR7EELVrRYXtvO5nJ4o1gKGYUQ0UvySwgvpbXuewelYoEPAQveTtaLWuu7+jrm0Q8P9H3SQSresQV7cwONB7ZicrfhaK4nJ9GMR3tYMn8iSVYL0yZmYYkxh+PjhRC9OfUWFekmBDOY/OKT+8OSX0KIKJUzFyafGVKGhTLS5ADO1lrblFJm4COl1Jta60+H1MhBKJg5z/vNyecEbO9wOnhhw/t4ajpoXLuROIsJT0sNmclWjitI59iCTNKT40fNffbaRhs33fMsy++8etS0WYgoFTX5NVZIfomjWb+dJu0dirL5Xpp9X1F1JWaOsTD7tAu8L868BACPxwPA5q2f8O+dVbSU7yFOdeCwNZCfEQcaFs/PJzUxjtyMZMwmY6Sa34MsQCnE8BgN+XW0kfwSR7OQapqUUkZgM1AIPKi1Xh/WVg0Dg8E7U23hcacFbPe43XR0OHF3dLD8P2/hcTloLfuUBGsMJmczE9ITmD0pneMLs4mPjSE+zjKi7Q7XAphCjFWjMb9GK8kvcbQLqdOktXYDxyqlUoBXlFKztdZfdt1HKXUjcCPAVbf9ijO++s1hb+xwMBiNWIxxEBvHsectC3jPYW/D1eHk3e3rWfX+IVqrd5Fi0bjabczKS0IpxYXzJpKZmkBCnMXfMRtOweZCkas1IQZvoPn1yO3f4MbFCyLQ0tFP8ksc7fotBO9xgFJ3Aa1a6z/2tk+4CsEjpcPhoLWlEbfbzYH/vIG7ox1HTSlpiXHEKweFOYkUjU/jpGnjMBoMGI2D60zVNtpYdvt9rFyWSEaCiVqbi2UrW0Z0ll0hBiVKC8G7CyW/pBB8cCS/xKg1nIXgSqlMoENr3aiUigPOBX43xCaOKmaLhRRLNgDpS24IeK+lsY6qtla27trE41v2Y2+sJjvRjMdp5+TCdOIsJpacOj2kUalgc6HIvCdCDJ7k18iR/BJjQSi353KBp3x1AQZgpdb6tfA2a/RITEknMSWdzLyJAdvb21rZV12Brb6alx//EKUAWy0ZSXGkxWpmT0hhUlYy84rG+Y+RBSiFGHaSXyNE8kuMBQO+PReKo+323HCrq6rA1lRP3f6t2GsO4mhpIDfJhHJ3cM7sbCxmE2cdNyUsNVNChMUouT0XErk9J8TYMszzNIlhlp6dR3p2nn+JmU5tthY+LNlDW1MdTz+2HqPRiKulhsykWPKSzMyfkk52aiLTJmVHqOVCCCHE2CWdpihiTUg8sqzMgsDh7KqyYlZXH6Z54w70u59hb2kkN9GIyaBZfMJEYkxG5k2bgFJHzwW/EEIIEU2k0zRKZE8oIHtCARx/asD21uZGVu7YjLOtmfs/3ATag9XVTEKchUmZVr9yA5QAACAASURBVE6fkUtKQhwTc9Ii1HIhhBDi6CCdplEuPimFmf5lZb4W8F7FgR08VlZMy+EDmNtLaGtpZFyyEYvJwDcWFGI2GSkcnymjU0IIIUQIpNN0FMubPJO8yTP9r7XWaK2xNdXz0JYP6LC30vHGpxgNBhK0jdTEWCZlJHLWnPFYY2PISEmIYOuFEKJ31/7lLeKSM/2v62uqefi7Z5CWJHNCifCRTtMYopRCKUVSagbHnLM04L0OhwOP9rB3/5es37gHW20pCbRht9mYnh1LjMnIpQumYokxkZmSIKNTQoiweWPDfrYfrPW/zkiO47pFcwN3Sspj3tV3+l9uW7ea3/7zfbJSk/zbjpuSxbnHFYS9vWLskE6TALwTeAJMmjWfSbPm+7d7PB7stmbabM386uN3cDlb0PWfE2sxk252kp0ST26qlUXz8jGbjMRZYiL1IwgxLJpb7ZFuQlSKj7UMerWDgXrl8ypmXfoT/+u3nvsTXz/FTue1mtbgcXsCjpl95ldpm3dmwLYXX/q9dJrEsJJOk+iTwWAgPimF+KQUMr96fcB7LY112F0uNpTu5s1Xt9HaUEOGxYOz3c68SUlYzAa+duo04ixmYszyn5oYHW59tSrSTYg6WmtSW4v5zbfOGJHPMxqNxCel+F/PWvQNbn11nX+EW2tN4elfDThGKRVwDIBBGXB2uMLfYDGqGVyukDtDMrmlGHZul4v66gra21o5uOEt3K4OTG01JMbFMD5JMT49gROKcikanxHpporhchRNbin5FdxnbzyDvbFmRD4rY8psik4a+iziuz56nfqDu4ehReJodsrJJ3PjtVeFlGHSaRIjqvZwGc52Owc/W0eHrR57SxN5iQpXRwenF6UTG2Pk/BMK5TbfaCOdJiHEKDUrL4lTCzNkRnARfTJyJwCQV1AUsN3V4eTL4j10ONp56Zk1KDRmex1J8bEUZsQwJTuRGZOymCTzTQkhhIgQ6TSJqGAyx/iXlSmcc0LAe4f276bMbuOF9/+Dtpdhb20hJ86D1h4unJuN1RLD/BkTibWYI9F0IYQQY4R0mkTUGz9lGsCRJWZ8OpwO1ny5GZfNwaNP/RuzUYGtltTEWOaMT2bmhBQmZKWSk54U7LRCCCHEgEinSYxa5hgL0+f5lpU5+ayA93Zt38zndQ00bNxKjHs/9lYbmRYXJqNiyfyJJFpjmTYxS57qE0IIETL5P4Y4Kk2e5RuVOvncgO0dTgf/3PAenkoHjWs3YY0xo1tryUiKY15BJscUpJOeHC+zCgshhOhBOk1iTDHHWJh92oXeF2ctAbwTeKI1G7d+wgc7qmg+tBerwYnT1kR+RhwAS07MJyUhjpz0JMwmY6SaL4QQIoL67TQppSYATwM5gAdYrrW+L9wNE2KkGAzeWY4L550esN3tcuHqcOJydfDwJ2/i6XDQVv4pCdYYzM5mJmQkMic/nXmTs4iPs2CNlWkSoo3klxBiOIUy0uQCbtNab1FKJQKblVJrtdY7wtw2ISLKaDJhNJn4/+zdeXxU1f3/8deZJftC9g1IAoGwiaiAu+KGG4pVSt21VnGpWqv91qWtXX5d1NbWtVa0Ki6IuCuIQlW0YtlEUJEdEhJC9m2yTTIz5/fHTIZMMkkmYSYzST7Px8OHmTv33vkkgTfnnHvuueHAUWdf6vFeS1MjtrZWPvpuLW99UkJD+TYSIsDW3MARo+IBOO+YHJLjo4mODHM3zMSAk/wSQvhNr40mrfVB4KDra4tSahuQBUjoiGErIioaiGbiied6bG+zWmmor8Fht/OnNR+gbfW0VhSSEBtJrMHK2PR48kcmMjM/E4NSA/Ysr+FK8ksI4U99mtOklMoBjgLWBaKYoeQvt15GQ4Oly/aYmFjufeJVvx0jQos5PJyElHQAkn5wg8d79TWVHGxq5OvtG3hm4y5a6ipIizWDrYVjxyYRFW7iwuPzZVQqQCS/fCf5JYR3PjealFIxwJvAHVrrei/vLwAWAFx51x855cLL/FbkYNTQYGHM9Y932b732dv8eowYPOISkolLSCY1K9tje0tTA7vKSrBUlfLmc2tQSqEbKkiOiyA5EiaPHEFO+gim5WUGqfLBT/KrbyS/hPDOp0aTUsqMM3Be0Vq/5W0frfVCYCHIs5uE6IuIqBjnY2Vyx5M/3fMp8lWlB9hYV82HWzfT8vk3WC3VZMSZMThsnDkljXCzkVlH5bmf/i66kvwSQviLL3fPKeDfwDat9d8DX5IQol1SehZJ6Vlk5x/hsb3JUs+nBTtprqti0b83YDAYsFsqSImLICs+jOljkkhLjGX86NQgVR4aJL+EEP7ky0jTicBVwLdKqc2ubfdprT8IXFlCiJ5ExcYdekbfSWd7vHewcA/vVJZSv+579H8202ypISPWSJgB5s4Yjdlk5KjxI4fL6JTklxDCb3y5e+4LYFika7DVVVVyoGCX1+09uf3CY7E5ul5RMBkUj73nfc6rTNocujKyx5KRPRaOOdFje0NdDUu2fUVLQz0tn2/EbDLRVu8cnRqTEs2JEzMYERPJqLSEIFXuf5JfA2cg8wskw0RwyIrgARITE+t1AmRMTGy3x2iHjaplXa8gaIetx8+yOTTZt77YZXvhE1d3e4xM2hx+YuITmOR+rMzFHu8d2P0dzxYWYDm4l7DWfTTV15IVbyLCrJh/4jjCTEbGZCUPl9GpYS/U8wskw0RwSKMpQPrT0xmRki4hIIIiK28KWXlT3K+11miHA0ttNf/c/BnWpiZsy9diNBiIo4ERMZFkp8Rw2hGjiIoIIymItQv/k/wSwjtpNPmBt2HiqoPFKIMRZfR8TlnHIeebz56G7vC+ttmo+tM8QBMef2gCr83S8/C2EP6mlEIZjcQnpTD1jHke77VaW9AOBzv2fMfa9TuxVOzjpXO7OZEIeZJfQvhOGk1+4G2YuPaR60m58G4iU0d7bO845KyNRkbd+rL79cEX7iDj2kdoLdtLVMZYr8cIEWxh4REA5EyZSc6UmUGuRhwuyS8hfCeNpj7y1iurqSyntGgv6aPGdHtc8ct3o1ubcdjt3DLH9Q+NhraqYkyJzkULtcOOo80KQFN5oXMfuw2H3eY+xtHWCkYjCkhMzXBus9vY9+jlGKM9J/A6Wq3d1tPfSZtCiMGtc4bVVJZzoGAXRqOx2wzzV36BM8OU0eiRXwWPX4V22D0yrKf8AskwERzSaOojb72yb564Gbvd3uNxurWZjGsfpbV8H1HpzmDa/+xP0dqBUu2PzdAoUxgYTZji09zHKoPRPVGy6PmfkXHNI7SW72N03kQATLu+p3L530m+8Jcen1n26r3d19PPSZtCiMGtc4Z988TNhCePxlq5v9tj/JVf4MywpHN/5pFfpsQsSl+6yyPDesovkAwTwSGNphDTfneSwRwO4O659XIQymgiKtXz8RwGY/e/Xpm0KYTwt/7ml8Ec3iXDesovkAwTwSGNpg58Wfej6mAR1X/5kcf72mGnfNkjZN36pHubvbGGivce7PC6luInr0Y77Jhik53bLJVUvvcQyuD8NTia6zn4wh1gNJJ+5d88PqO53NkL1Hab8xKd7v5JDwdeuRdHaxMOh51fXTvH6/chhBh6+pNh2mFn/7M/RZnC3RkWqPwCZ4b1lF/gyrBO+dX5+xAiGKTR1IFP634YTYz86SKP99uqiildfE+XHk7ynLvA4Lq7xG7DnJLDwRd/Tsa1jwJQ/OQ1JJ9/p3t/jUYpA2Wv/YbSF3/u3Ga3oR0OzMmjAFBGEwZzWI/L9Tlam7pcwuvyfQghhpz+ZFhbVTFaOyh79T6P/QKRX+DMsN6WG3W0NpE2//955FeX70OIIJBGUwfdjSJhtx3q8WhordyPUgbMSSMBMCd6efq8UoQlZmEKcw5TN5XuRRkMnfZx/d81DK3sNjAYUUYTGdc8AkBr2V6qVjzWc+Fao+02d2/OVl9ByQs/A4edWpPZvZvc+ivE0OZtFAnokmFtVcUe+dVl3GeA88vR1tolwyqXPeyRXyAZJoJPGk0deRlFaq3cT+V7D7l7b7WPXI85aaSzd2ZrBZwLAaLw6OFV/+VHXYLI0WYFh53W8n24DsSUmMWh9HEACm23UfT4la5tzrOUPHcr4Bw2bx8mt6U5G2tVZSUAHYbTFelXPAQcuj0cPG/97c+Kv0KIENcpw1or9xOWPJqS5271yDCtHZ75hfLIsEDlFzgzrOy133jklzIY0Q67Z4Zd9bBHfoFkmAg+aTT1g1IGFAqzqxfW1mpFKWPPBxmM7smR7XefuN8yhwHOUDKYw1EGAzm3vQR0vdOk3d5nb+NPLywD4FfXzvFosG38y48wmMPdoeiNzAsQYvjqkl+msJ4zbADzC5wZ1tsjeyTDRDBIoykEONpaO3xtRTscQalDHoAphOgryS8xnEijqQOF6nF0BsAQFkHJc7dib6rF4Jok6XDYO6xV0k5T/K/rDu1jtwEK7Db3ELOjrZXSl+5CufZpHw7XnfYpX/obbK6F4Nr1OAStFG2VRWiHDYfp0K/Y0KVGT/IATCEGN28ZprVnI8YQFkHpkns75ZcRPC7IBTG/wJlh1Qc88gt6zjDJLzEQhnWjqXPPRLv+2itwT4C0dbrNY9otTwC9Dy9Pv3epxz7e3DJnJqOvf7LL9sInruafy9b3/RtyCYtJIDLVuVhdVs4493ZrUnK/zymECC3eRlY0ukt+de7QTbvliZDOL3BmmNFk8sgvkAwTwTesG02deybVf5nPgadvQGu7u4el7Xa0w96lt9Kxp9TfCYlKOyh54Q6v2/ui8+fbLJUUPnE1BmXwCBmZICnE0OFtZKX6L/Mp6jBCpO12tALsnhkWyvkFzgwrf+3+Lo0kyTARbL02mpRSzwFzgHKt9ZTAlxQ80+9dCtBrD6uz/l4v99eKtnK9XojuDbcMk/wSInB8GWl6AXgCeLGX/YYFmWwoxKDzApJhbpJhQvRfr40mrfXnSqmcwJcyOPhzsmGorTMSavUI4Q+SYZ78lWGhlhehVo8Ymob1nKZgC7VeXajVI4QIXaGWF6FWjxia/NZoUkotABYAXHnXHznlwsv8deqAkZ6JEAIkv4QQvvFbo0lrvRBYCPDM53t7foR1iJCeiRACJL+EEL7pebVDIYQQQggB+LbkwKvALCBZKVUM/FZr/e9AFxaqZEhciMFFMsyTZJgQ/aecT7j2r8EyvC2E8I8bThnT89NVBxHJLyGGl8mZcZyQl+xThsnlOSGEEEIIH0ijSQghhBDCB9JoEkIIIYTwgTSahBBCCCF8II0mIYQQQggfSKNJCCGEEMIH0mgSQgghhPCBNJqEEEIIIXwgjSYhhBBCCB9Io0kIIYQQwgfSaBJCCCGE8IE0moQQQgghfCCNJiGEEEIIH0ijSQghhBDCB9JoEkIIIYTwgTSahBBCCCF84FOjSSl1jlJqh1Jqt1LqnkAXJYQQ/iL5JYTwl14bTUopI/AkcC4wCbhMKTUp0IUJIcThkvwSQviTLyNNM4HdWuu9WutWYAkwN7BlCSGEX0h+CSH8xuTDPllAUYfXxcCxPR0wISP2cGoSQgh/kfwSQvQoLT7C5319aTQpL9t0l52UWgAscL28UWu90Ocq/EgptSBYn304BmPdUvPAGIw1h5BBlV/ttQy237fUPHAGY92Dsebu+HJ5rhgY1eH1SKCk805a64Va6+mu/4L5w1nQ+y4haTDWLTUPjMFYc6gYbPkFg/P3LTUPnMFY92Cs2StfGk0bgHFKqVylVBhwKfBeYMsSQgi/kPwSQvhNr5fntNY2pdStwEeAEXhOa7014JUJIcRhkvwSQviTL3Oa0Fp/AHwQ4Fr8JdhD6/01GOuWmgfGYKw5ZAyy/ILB+fuWmgfOYKx7MNbsldK6y5xIIYQQQgjRiTxGRQghhBDCB0Ou0aSUMiqlvlZKLQt2Lb5QShUopb5VSm1WSm0Mdj2+UEqNUEq9oZTarpTappQ6Ptg19UYple/6Gbf/V6+UuiPYdfVGKfVzpdRWpdR3SqlXlVK+LygiBp3Bll8gGTYQJL9Cx5C7PKeUuhOYDsRprecEu57eKKUKgOla68pg1+IrpdQi4L9a62dddyRFaa1rg12Xr1yP1jgAHKu1Lgx2Pd1RSmUBXwCTtNbNSqmlwAda6xeCW5kIlMGWXyAZNtAkv4JrSI00KaVGAucDzwa7lqFKKRUHnAL8G0Br3TpYwqaDM4A9oRw4HZiASKWUCYjCyxpDYmiQ/BoYQyDDJL+CaEg1moBHgF8CjmAX0gcaWKmU+sq1KnGoGwNUAM+7LiM8q5SKDnZRfXQp8Gqwi+iN1voA8DdgP3AQqNNarwxuVSKABmN+gWTYQJP8CqIh02hSSs0ByrXWXwW7lj46UWt9NM6nsP9UKXVKsAvqhQk4GnhKa30U0AjcE9ySfOcair8QeD3YtfRGKZWA8+GyuUAmEK2UujK4VYlAGMT5BZJhA0byK/iGTKMJOBG40HV9fQlwulLq5eCW1DutdYnr/+XA2zifyh7KioFirfU61+s3cAbQYHEusElrXRbsQnxwJrBPa12htW4D3gJOCHJNIjAGZX6BZNgAk/wKsiHTaNJa36u1Hqm1zsE5fPmJ1jqkW7VKqWilVGz718Bs4LvgVtUzrXUpUKSUyndtOgP4Pogl9dVlDIKhbZf9wHFKqSillML5s94W5JpEAAzG/ALJsCCQ/Aoyn1YEFwGTBrzt/POECVistf4wuCX55DbgFddQ8V7gx0GuxydKqSjgLODGYNfiC631OqXUG8AmwAZ8zRBaWVcMCZJhA0TyKzQMuSUHhBBCCCECYchcnhNCCCGECCRpNAkhhBBC+EAaTUIIIYQQPpBGkxBCCCGED6TRJIQQQgjhA2k0CSGEEEL4QBpNQgghhBA+kEbTMKaUuk8pJU9UF0IIIXwgjaZBTClVoJQq6/iEbqXU9Uqp1b4cr7X+s9b6+gDUtVop1aKUalBK1SmlPldKHeHvzxFCiN4opU5SSn3pyqJqpdQapdTJSqnG9kfAdNr/a6XUrUqpHKWUVkpt6vR+slKq1fWcQDHMSKNp8DMBPwt2EV7cqrWOAZKA1cBLwS1HCDHcKKXigGXA40AikAX8HqjD+eDeSzrtPwWYhOfz3aJd29tdDuwLYNkihEmjafD7K/ALpdQIb28qpR5VShUppeqVUl8ppU7u8N7v2p+krpT6UCl1a6djtyilLnZ9PUEptcrVU9uhlJrvS3FaaxvOp7ZP6nDemUqp/ymlapVSB5VST7ieAYVS6kml1MOd6nhfKXWH6+tMpdSbSqkKpdQ+pdTtnc670fW9liml/u5LjUKIIWs8gNb6Va21XWvdrLVeqbX+BlgEXN1p/6uB5Vrrqg7bXgKu6bTPi4EsWoQuaTQNfhtxjuT8opv3NwDTcPayFgOvK6UivOy3GOcTtAFQSk0CsoHlrst/q1z7pLr2+6dSanJvxbkaQ1cAaztstgM/B5KB43E+/foW13uLgMuUUgbX8cmu9191bXsf2IKzx3gGcIdS6mzXsY8Cj2qt44CxwNLe6hNCDGk7AbtSapFS6lylVEKH914CTlZKjQZw5cvldG0QvQxcqpQyKqUmArHAugGoXYQgaTQNDfcDtymlUjq/obV+WWtdpbW2aa0fBsKBfC/neBuYppTKdr2+AnhLa20F5gAFWuvnXefZBLwJzOuhpseUUrVAA3ArziHx9pq+0lqvdZ2rAHgaONX13nqcQ+dnuHa/FFittS4DZgApWus/aK1btdZ7gWdc+wC0AXlKqWStdYPWumNDTQgxzGit64GTAI0zKyqUUu8ppdK01kXAZ8CVrt3PACKA5Z1OUwzsAM7EOeIko0zDmDSahgCt9Xc4r9vf0/k9pdRdSqltrkmQtUA8zhGezuew4AyL9gbIpcArrq+zgWNdl9NqXee5AkjvoazbtdYjcIbQHOANpdRUV03jlVLLlFKlSql64M+dalrEoSC7kkPzobKBzE513Aekud7/Cc7h+O1KqQ1KqTk91CeEGAa01tu01tdqrUcCU4BM4BHX2x0v0V0FLNZat3k5zYvAtThH2V8ObMUilEmjaej4LXADzstWALjmL90NzAcSXI2YOkB1c45XcV4aOx6IBD51bS8CPtNaj+jwX4zW+ubeitJaO7TW/wV2A7Ndm58CtgPjXJfS7utU08vAXKXUkcBE4J0OdezrVEes1vo812ft0lpfhvMS4oM4G2rRCCEEoLXeDryAs/EE8BaQpZQ6DbiY7keR3gTOB/ZqrQsDXacIXdJoGiK01ruB14DbO2yOBWxABWBSSt0PxPVwmg9wjub8AXhNa+1wbV8GjFdKXaWUMrv+m+G6vt8rVyNsErC1Q131QINSagLg0fjSWhfjnIv1EvCm1rrZ9dZ6oF4pdbdSKtI1x2CKUmqG63OuVEqluOqudR1j96VGIcTQ47qB5S6l1EjX61E4R4vWAmitG4E3gOeBQq31Rm/nce13OuD3JVrE4CKNpqHlD0DHkZWPgBU4J0MWAi04R2u8cs1fegvntfvFHbZbcI4SXQqUAKU4R3LCe6jlCdc6TQ04Gz+/1lqvcL33C5wTLi045xm85uX4RcARdFiqQGttBy7AObF9H1AJPIvzkiPAOcBW12c+ClyqtW7poUYhxNBmAY4F1imlGnE2lr4D7uqwzyKcncUe5ypprTdqrfcEqlAxOCitdbBrEKILpdQpOC/T5XQY8RJCCCGCRkaaRMhRSplxLtj5rDSYhBBChAppNImQ4ponVQtkcOgOFyGEECLo5PKcEEIIIYQPZKRJCCGEEMIH0mgSQgghhPCBKRAnfW/nG3LNT4hh5MLx87pbMHXQkfwSYnjJHZHHEanTfMqwgDSamtoaA3FaIYQIOMkvIYaXVrvV533l8pwQQgghhA+k0SSEEEII4QNpNAkhhBBC+CAgc5q8UVoRTRzhhnAUoTdnVKOxOqw0Uo9WMg9UCHGI5JcQAgaw0RRNHHFRcWDQhGDmgIZwRzg0QQN1wa5GCBFCJL+EEDCAl+fCDeGhGzjgrMugnXUKIUQHkl9CCBjARpNChW7gtFOE5NC7ECK4JL+EEDDMJoKvW72eK0+/lstPvZpX/vlqsMsRQog+kQwTIriGTaPJbrfzyP2P89ALf2bRqn/z8XufUrCrMNhlCSGETyTDhAi+YdNo2rZ5B1nZmWSOzsQcZub0C2bxxco1wS5LCCF8IhkmRPAN2N1zfXHTvLuorW3qsn3EiCj+9cbD/TpnZVklqZmp7tcpGSls27y93zUKIYQ3gcgvkAwTIhSEZKOptraJ8Tc90mX7zn/d0e9zau1l7RKZMymE8LNA5BdIhgkRCobN5bmU9BTKS8rdrysOVpCcmhTEioQQwneSYUIE37BpNE04Mp/iggMcLDpIW2sbn7y/mhPPOiHYZQkhhE8kw4QIvpC8PBcIJpORO/5wG7+4+h4cdgfnzT+H3PE5wS5LCCF8IhkmRPANm0YTwHGnHctxpx0b7DKEEKJfJMOECK6QbDSNGBHlddLkiBFRQahGCCF8J/klxNDVa6NJKZUPvNZh0xjgfq1119tD/ORwbssVQoh2kl9CCH/qtdGktd4BTANQShmBA8DbAa5LCCEOm+SXEMKf+nr33BnAHq21rN0vhBhsJL+EEIelr3OaLgXkKZFC9NHmL77hw6UrqSipJCUzmXPmz2baSVODXdZwI/klRD9Ifh3i80iTUioMuBB4vZv3FyilNiqlNn78+mo/lSfE4Lf5i2947fmlJJwVyYzfTiThrEhee34pm7/4JtilDRuSX0L0j+SXp75cnjsX2KS1LvP2ptZ6odZ6utZ6+hk/nOWX4vztgf/7K3OPmce1s68PdiliGPlw6UpyLsogYWwcBqOBhLFx5FyUwYdLVwa7tOFE8kuIfpD88tSXRtNlDPKh7XPnnc1fF/0l2GWIYaaipJL4nBiPbfE5MVSUVAapomFJ8kuIfpD88uRTo0kpFQWcBbwV2HIC68hjpxIbHxvsMsQwk5KZTF1Bg8e2uoIGUjKTg1TR8CL5JUT/SX558mkiuNa6CRjwJ0PWVtfxj7sf5M6H7iY+IX6gP14Ivzhn/myeefDfOMLttDXaMEebMFiN3HD3T4Jd2rAg+SVE/0l+eQrpB/auev0DbEU7Wbn0g2CXIkS/7d26jzbdRsaZiYxfMJKMMxNp023s3bov2KWJAJL8EkOB5JenkG001VbXseGDVTx6SQYbPlhFXU1dsEsSol9WvfMxYy7PIG1mIvHZMaTNTGTM5RmseufjYJcmAkTySwwVkl+eQvLZc+DspV2QpxiXFsEFeU2sXPoBP7zxsmCXJUSvOq9p0tzQTHxep4mUeTG0NBYFqUIRaJJfYjDrmGFN9U3E5no+N3E451dIjjS199KuOCYOgCuOifNLb+33t/2JWy6+nf17i5h33KUsf22FP8oVws3bmiYoqN3VaSLl7gYioiOCVKUIJMkvMZh1zjBTpJHaXRYcdod7n+GcXyE50tTeS0uKcZaXFGPigjx12L213z7+K3+VKIRXHdc0AUgYG0fG9BT2Li5BXZFJfF4Mdbsb2LfkIGdfdHaQqxWBIPklBrPOGZZ9aiYFS0vhR4rE/Lhhn18h2Wja8uUmPi1p4dVvSjy2J1ZukiFuEdIqSirJyZnose3IH+fz8W0bKHypgpbGIiKiIzj7orO5+Ma5QapSBJLklxjMOmfYhEvGoB2aXQuLUUoN+/wKyUbTHxf9NdglCNEv7WuatPfSwLmmydipOdzz2C+CWJkYKJJfYjDzlmHpR6cQXhUjGUaINpqEGKzOmT+b155fChc5V82tK2ig4J2D/OjH83s9Vh6KKYQItv5m2HDJL2k0CeFH7SHx4dKV7CwpIiUzmR/9eH6v4dE++TLnogxyciZSV9DgDK4O5xRCiEDrT4YNp/ySRpMQPehP72naSVP7HBTeJpBzkXP7UAsdIcTAOWA0bwAAIABJREFU6O/oT18zbDjllzSahOjGQPaevE0gj8+JYWfJ8FwLRQhxeCS/AmPYNJrKS8r5050PUl1Rg8GguOCy85l33cXBLkuEsIHsPXU3gXy4PhRTdCUZJvpC8iswQnJxy0Awmoz89Nc38dLHz/HU24/z9kvvUrCrMNhliRBWUVJJfE6nlbxzYqgoqfT7Z50zfzYF7xykZk89DruDmj31FLxzkHPmz/b7Z4nBSTJM9IXkV2AMm5GmpNQkklKdDzqPiokie+xoKkoryRmXHeTKRKgayN5TfyeQi+FDMkz0heRXYIRso2nt6vW8ufhNDhaVkjEqnUsuv4TjZs30y7kPFpWy6/vdTJo2wS/nE0ND50mT4yePY8M7Gzxuvd32wj5iYmK4a949fZpY6cuEzP5MIBehKZD5BZJhwruOORMZHUHpC6VMvDbXY+mAGTNn8MDtf+vT5HDJr0NCstG0dvV6nnl6ITlzMxmdO4XafRaeeXohwGEHT1NjM/ff/Htuu/8WomOj/VGuGAK8TZrc8M4GZsycwc5Vu9hZUkR4eDjGCAOj5qW4Q8iXiZXD6XZcEdj8Askw4Z23nNm5pInCJeVYrc7RnxkzZ7Bh/YY+ZZHklyef5jQppUYopd5QSm1XSm1TSh0fyKLeXPwmOXMzScyLx2A0kJgXT87cTN5c/OZhndfWZuP+m37HmRedwSnnnOynasVQ0HHSpMFoIGFsHDkXZbBz6y7ueewXPPzGA8QlxTL+0uwu+3y4dGW/zt3bccI/hkp+gWSY6J63nBl/aTZxSbE8/MYD3PPYL9i5dVefs0jyy5OvI02PAh9qrecppcKAqADWxMGiUkbnTvHYNiI3lh1F/Z/0qLXmwbv/RnZeNj+6ft7hlij6KNRXi60oqSSyNpW1D2+hqaKFqJQIsmdlekya7O9ttcPpdtwQNejzCyTDgm24Zpjkl6deG01KqTjgFOBaAK11K9AayKIyRqVTu89CYl68e1vtPgsZo9L7fc5vN37Hyrf+w5gJufzk3BsBuOGX13Hcaccedr2iZ4NheDc8PJydywvImZdOTHYkDYXN7HyjgPjwBPc+/Z1YOZxuxw01QyW/QDIsmIZzhkl+efJlpGkMUAE8r5Q6EvgK+JnWujFQRV1y+SXOOQBznT202n0WCt4t4YYbF/T7nFNnHMFnBf/xY5XCV4FeL6S/PcCOx1lqLYz6YTJRmeEogyIqM5z00xKwfqnc+/f3mUyH8zw6cdiGRH6BZFgwBTLD/JFfKZnJWK1W0s9K8HuGSX558qXRZAKOBm7TWq9TSj0K3AP8puNOSqkFwAKA6397LWf8cFa/i2qfLPnm4jfZUVRIxqh0brhxgV/vPhEDJ5DDu/3tAXY+7pO71xGZFk5rjQ3taMVkNpM1NZOtq/a5j+nvbbXD6XbcECT5JQ5boDLMX/lVV9DAjr/VMG7cZFrrW7G2NfstwyS/PPnSaCoGirXW61yv38AZOh601guBhQBLti7Sh1vYcbNmSsgMEYEc3u1vD7DzcTEZUdia7IQlhpGemwlAzZ76LjX297ba4XI7bgiS/BKHLVAZ5q/8ShgbR2xWJNV76sg7Mce9n78yTPLrkF4bTVrrUqVUkVIqX2u9AzgD+D7wpYmhoi/Du74MVXfcp7aqjhMvm+bxfn8mZ+ecnsXW13ajTLC1bR/hMWYMVhPX/vzqw/jORbBJfgl/8DXDgpVfAGPPHc2WF3ZQ+mkV1oY2ybAA8fXuuduAV1x3nuwFftzXD9Jo0IDqddfg0a46hV/5Orzry1B1532+/MvXHPimhFFHZREVEwn0f3K2waxIPy2BmKxIrBV2qlZb/PYzEEEl+SUOiy8ZFuz8aixvxhxlIv20JMJTjJJhAeJTo0lrvRmYfjgfZHVYCXeEg0GHZvBowKGwOqyhWd8g58vwri9D1R8uXQnxDjY8/i1tzXaMZkXT2y0YDSbGHDfa51VvO/ccdy8vJPOspEPhNR6S0usD8nBLMbAkv4Q/9JZhwcyvuoIG9n10gCk/ySPjiBTnTpJhATFgK4I3Ug9NEG4IR4Xg32qNxuqwOusUQeHLZMs93xRArJ0x12QQNzaK+j1N7F18kB3P76fqo0afV73t3HNsrLIybWqmu7fn7bPF8CX5JXoTzPxKyUwmIiyCtElJPX6+OHwD1mjSStNAHQ2hPHocelk4rPgy2dKmWxl3aRbx+c7HR8TnRzPm8gx2LTzAw288AMADt//Np8mVHXuOD9z+N9oqbXBoaZ1hvRaJ8CT5JXoTzPxqP07WUwq8kHz2nBhcfJn8+NbT77LqnY9paWwhIjqCsy46A8Bj25SjJlPwTlGPky21QxOZFkZzmRVt1yijIjItDO049K9Zf24PlrVIhBi++pNhWVmZHDhQEhL5BZJhA0UaTeKw+DL58a2n3+Wj5R+Re1UG8Xkx1O1uYMXLH2K3Ohj/k5Hubd8s2cLUKUdSs8rS7WTLsPAw6nc3kTAlFkOYwtGqqfnOQlh4mHuf/tweLGuRCDE89SfDKr+ppeCtAlJPSGDU2blBz6+OtUqGBZY0msRh8WXy46p3Pib3qgwSJsQ695kQS9b5SRQtr6BkVSV7Xy0hIjmMpJnxbPnsG8ZMzO3282JiYjj4cTXhCWHE5ETQdMDKwY+rsbXauGvePaRkJjN+8jg2vLOhzz0uWYtEiOGnPxkWkWwm+fh4Sj+vonJ9bUjkF0iGDQRpNInD4stQcktjC/F5MR77tFlsKKXIPC+ZmNERNOxvoeidcprqm0k4K7LbHp8yKcbMHsX+90toqW4lPMFMyswRlH1aw4zfOo/Z8M4GZsycwc5Vu6THJYToUX8yrG5nI/U7mhj9g1SSj4mX/BpGpNEkDosvQ8kR0RHU7W5w99IAKjfUM/KCFGJznXerxeZGknluEo0LW3rs8aVkJhOfHskp980AoHR/GU3VzcRkRGEwGtzH7Fy1i3se+0XAv38hxODWnwyr3mwh6/xkwhPMKKOS/BpGDMEuQAxu58yfTcE7B6nZU4/D7qBmTz0F7xxk/ORxPHD737hr3j1ERUex67liarZbcNg0NdsttJS1EpkShr3FARrsLQ7MUUYMZs8/kvE5MVSUVHb7eXX7LBxYUUXO6VkexxTtPOD+/Adu/xubv/hmwH4mQojBw1uGbXthHw11De78mHLUZPYtOejOMGtVG+ZII8Zwo+TXMCMjTeKweJt86G2dka0vtrL7mYPY7TYioiNITk/G2GimtaoNh0NjMChaSm3EpEV6nL9zj6/z5zU1NjHqzDTSjzq0z96VxTjCbT0OkwshBHTNlPDwcIwRBkbNS+kwp6iIqVOO5LuXttLSWAQK2io0BqOdxnqb5NcwIo0mcdi8rRfSeWLl5KvHUrOq2T3kvPmLb3ju0RdImhVNRFoYjSVWyv9Th9kQRs2e+h4nQHb8vPY7X2rG13dZGbevD8EUQgxPnddsSzgrtUt+1Kyy8NSKxwDJr+FMGk3C73yZWLl36z4aaxpp+7wVW5MdU5SRVksbk2ZO7vGW3c5kZVwhhD9JfomeSKNJ+J0vEytXvfMx464b6TE5vGa7he9e2uruzflKVsYVQviL5JfoiTSahN/5sjKtt2UI4vNiaKwt7PFBldD76r2yMq4Qor8kv0RPpNEk/M6XlWm9LUNQtLKc8ARzjxMgfVm9V1bGFUL0l+RX9w7sKaGmosb5QsOBLfsw2B3u97XW2OsaSIqN8svnNVnbaDAYCI84tGI6EWFkTB7l/Fop8o4YS0RUuF8+zxdDvtHUUNdAyd6DHtsqC8uxFFehvDzgsrXWQkqcf37hvqq0NGOOj/H6XkxGAsm56R7bMnIziB3hff/D9eR9T7NxzVfYbQ6MJgNjx43BFGXq0iuqr7Hw/P3PcN3/W+C1ls5Dzpu/+MajBzblqMl8s2QLXIr7MSoHP64k/8psrOZmSgrqMZnNpM9O9JgA6cvqvUKI4alzfk0/8RhOPO/4LiM7Yybn9im/wHOEqH0ZlXHXjRwy+VVTUUtFcQW2NhsF63ZgMhjQGux1FpJc/yaOio9idnaK+5gJ5xxFakJsd6cMiOLyGvaWVAHQZrPz3tPLMJgMlNc3ET4iFpvdTua0sUTFRjI6fxQRURF+/fxB2WiqOlhFfY0FgLLdB6ktqnC/11LbQFqHRo/R4eDMiVkeDwBPSY9n+mlHDFS5h2XTjiLKKqrdrzXw6Rff0drhO6qwNBHmanQ5HA6yj80nKsZ562tGTrrPf2ievO9pvt7yNeMWZBE3NooDqyop+F8BeZeMZMYNnr2igzv3Yygp4Iu3VnPudXN6PK+33lXnW3gjoiMwKTNhqUaMcYqw8EjsVjvWNisH9h76/foySdOX3pwQYmjpnF/1e5rY8NxGvt+2lanX5XtkwdisbJ/zC7xnSudlVAZDfh3YU4K1pZW963dga7LSVN9IenQESilijAZOyktDobhn/klEdRzdCSEjUxMYmZrgfn3G9HyP9+12Bx9v2kVrWRMfrd5CU6sNa3gYEXFRjDl2AolpIxiRPKLfn+9To0kpVQBYADtg01pP7/cn9sLhcHBwXykaTV1ZLQWbdmFQipa6RndjKMFs4shRzrsLZqXEcPypkwNVTtAdnT+qy7bzjp3oZU+nNpudFet3YKusxeHQrP5wI4Yw56+5rL6JiPhotIZR08aQkJFIZFQESRnOn+XGNV8xbkEW8fnRADTsbSb3inQMUXisVrts8Qoiamp58uJUfrpsDSddPKvHka/uelcdb+EFuPnc22m12IjOcjb4TBFGLBYbNrvNvY8vkzRDuTcnBt5A5pcIns75FZ8fTWRyGCknx3lkgeMCO5se38SK67J8yi/wnimdl1EJlfyqr66nvsZC6a4SKvaU0FTXRHpsJFprxiVEk5sUy+XH55OTkdjruQYjo9HA7BnOhtSc4ye5txcerOarvQfZ8NkW1tY3Y4sMZ9SRuYzMH8W4Pvwo+jLSdJrWurL33XrnsDvcq6Tu2biLhoo6mqvrSYqJpLWllekjk4gKN5EXEcYfrjwN5e06mvDKbDJy4QmH/qDMO3lKl3201ry/bjv/ffNb3v5iC5V1DcTGRtFqaaNueyPNpVZGTIyhqbSVuLFRNJVY3cfG58Tw1Y7vSY/SXLG4FoNB8drfX+X6P9zQbU0VJZVE1qay9uEtNFW0EJUSQfasTI+VcgEM2siBFVUYw4zEZEfSUNjMgRVVGLTRvU/7JEnHBXZsZiumtgj2v1/mMUnSl96cGHb8ll8idHS8ZGZvs7sfy9TO1uQgIs1zxMQRaaOltY0rFh/wKb/AtwwbyPyytdmoKq2moa6R3Wu+RylFS1UdibFRRGnN1JGJTE+KY9aVp/n+wxzisjMSyc5I5OITDw2yrFi/g/XvfcmOEsUxVx/r03kCenlOa+28TlpYTvHWQhprLIwwGrFaWzkqM4Fws4krc9OZdvJEjAYDRqM81WUgKKUwo/lw21YmX5dFUk4MVQUNlP2tmti8SMJiTdTvagSHZtfzB8AB9h0KZVQQDa2tVsZcN5LUnCgqCptZt/hr/vfheo4/Z6bXzwsPD2fn8gJy5qW7w2TnGwXEhyd47DdqfBaObCsHV1S7gyljWgqGwkOT/Np7Wq88upja0ipGpCdxxc8u9+iB+dKbE0IMbp0vY330s2rqdjcyYkIsyvVPiSnKQEtZK7j+nWyoa6RyTw1xmeGccHu2T/kFvmVYIPKrrdmGrdlOyf/KMNjg3YdfJyEsjLbmFqaPTiHebOSOuTMxm4yYjAYMBvk3tC/OnZnPuTPzId33KxC+Npo0sFIppYGntdYLe9p59eJPaCiqwKwUk1LiGJcUwx1nHYnZZAzZ66TDwYq13/PIu6vZV1pNXVMzuWekkuJqWKSMjWPktCQKXitjzBUZpB47gtZ6GxX/q2PCxZlMmJRKxe56/vvvnSSMjqB4o4XijRaik8KISjPx7APP8fozb5KaldLlFlqDSZF+SgJhqUbaWtsISzWSfloC1i89RxDbe2H5F+V2utX2Ao/9xkzOJc2keOOmbH66rJGxU8Z4PU9PvTkxrPQpv0Ro6nyrfn2VhZxLD13GGnlSGoVLy1DzFfHjo6nf00RzZSsVH9dTk+1ccfvA5oOUra7m6NnJGIyKtDFRTLw4hcWPvspnH3ze7TIAvmRYf/MrZ0I2DXUN1FXVs2f9DrJS0/jy0XUkToyljTbiY6Oo39bIIz+ey/nHTyYi3DwwP3Dhla+NphO11iVKqVRglVJqu9b68447KKUWAAsA/vrTudy54Gxp9YaQFWu/55dL3mfM3DROykll/f8KKVhTSXx6FKOmueaHXT+BN29bx56FB2hrc2A2GzguZzSmTSbWf7Cb3PRElE2TOy8DZVRoh6byWwt1Za3E5IcTHx+GpbmaZ/70b04+/XguuPECIqMjqauqJzstmbZ6G9quUUZFZFo45VWeV0t8vdV2zdufcWGegXFp4VyY19xlMqcvvTkxrPQpv67/7bWc8cNZQShTdMfb5Oi9T+8lrTbevc8Rl45HOzQ7FxajNRhNBmacON1999zOkiKqyqrIviCVhtQItpc75xrVVNpoprnHpQJ8ybCe8svWZsNus1NdXsPbj77BSFMLi9e2Ed9m5d8/f4KLTzuaSLORXx8/EdNx41l71Bjufu5dKsrriE418K+bfsi5xx2adiGCx6dGk9a6xPX/cqXU28BM4PNO+ywEnD24Lx/X/i1THK5H3l3NmLlp7pGlpNwYwuNNbPu4xN1oqipo4LgjcvnoL7d0e56z7/0nESqKlFzneVa+/y3jL88iJjGCcVnOW1GLNldRvmo7Ra9Gs7fKQmNVA6WfG0g6JpbEsZG0OqCxwOoxQbKdt1t9O6qvsbB11Rp+/SPn5192TAyXvdZ1Mmdvo1Fi+Ohrfi3ZukjyK8R4mxydMzeDPSv2k3HMoVvgRx2bQUz9CPfk7HYez5WbGOlx6X7vkq8Yc3FWjxOvbXYbtiY7MbkRhBmh1Q4N+1rcGeZwONcqmnhMPm31LWjtoKqwnJotBXyyeR80NJOREIOy22jbV8Dv5sQxMT0cS4uD+UvrWXDmNJLio901nTAllwyT4p0FI7l5WRMzJ2b7+0cq+qnXRpNSKhowaK0trq9nA38IeGXCr/aVVnNSTqr7deqIWKxtNupKmnDYHVQVNLD33TIeuvSCLsdW1jZw4wMvs/Deq7hj7ixufvp1rBF2rA1tNNe1EjUpkjFjDgVX1hEJrFpSxP+2bGVfaTXKCJY9jZgiFfVbG2g8aKWhsAWj3UyTpYmoPiyE1j7KlBTj/KObFGPiwjwDH7/yEYXbC93rrvQ2GiWGB8mvocHb5Oj0Kanserm41wfkAu515U49/xRee2YpjnAb1oY2wmPM1BY0Mm2K57nNySZ2fbuHu+bdQ0pmMrZmB4XvlJMwKYroFDOWg62UranF0GJg2QNLSIswY3LNyb1sxjjioiNIOiLboyEE8PdXVvLDSWamjXROWA+PMTAnz8A/3/iUzbuKWXjvVSTFR/Pi8i+Zk2cgPzWcOXktLFq2hjuvmO3PH6noJ19GmtKAt113sJmAxVrrDwNalfC73PREqgoa3CNN8dERVBeYidAmvvjDdnLTE3no0gu8DgG/uPxLakqLWLRsDRPHjsQYbiDjtHjMKSYsB5op+7yW+tQE4qc514Mq3F6NpbWViNlRnJSTytfri9j1dglNO5qwWR1EJ5qJPjqOpJJItj//ISX1zajoCDKm5pI9Kdu9BII329dvZX2plVe/LfPY3rZlHWnmFr54azUn/uBUn0ajxLAg+TUEeLu5o63SRlZOBjWrmntdOXvN259hKClg65exGCMMpM5KIjzFSPXmBhrKmtn1WiGZ01Ow7G+kpaGF2lILKE1MtpHGtmpsNiu6zkDjVgeV9TYi40xkHBtPTvUIXr75PJ+/j9WbdlJSbmXxt+Ue2216E4kmK4uWreHq809g2WcbWDrfuWjk1UdHM3/pBq6Zc2KXRpgYeL02mrTWe4EjB6AWEUB3zJ3FL5e8D3Nx3y1X/lENL/z8ih6vlVfWNrDssw08dXEyNy/bwLLNO5jyo1HuxlddegtGk4EtH+ynJrKZJKLZ8moBE2ZnuPdJzoml9cJUqldXc9adI2kotbFpaRlH54/hoWvOBKC1zcbmXQf44L0vWVNlwRQXzaijx5I7OZeYDkHx83/dDeCxIrnWmoW3/Jm/z3GuG9XSbPVpNEoMfZJfQ0O3z2O7YT7JGUnUVtS69/1u7VZqi6toKq1x3orfYuWz5Z9x8igj7360jsyZKagDDlqL7WSPSiDxvAi2vXeA9JwoDOk2otrM1Ox1cNKPx7mnLny7PoqtLxdy3MWpxKSb3BmWl5/bp+/jvYdvBTxH77XWzP/lozw1x5mxjS2tzMkzkOzKr+QYE3PyDO4GVftx0oAKjkG5Irjou/aG0SPvruaL0uIeR5Y66jxM/OCGUs7PyXC/Hx8dwfgp6ex6qYSGf+wjLiaKcIxMOivLvU+bzUZMaji7i1t461d7CTMZSA0Lp7TyUNCFmU3MnJTNzEnOa/d1Dc18vbOY91/4iAONLUSnJpB/yhRGj3cu9tnec/zirdUAHpfiXvxoLd8YdI+jUXKpTojgqzhQQU15rce22oPVVO8txdBpfb5Rkans/VcB9Q1NxMVGcdTEsVi/3oNxbymzsz2XExmRm8LUs6YBzktixxwfy52nxJP20F7yf5BORsqhCeQOu4PST6opXFFGfUMTdoOBk6/PdzeYACJSzThaYfVTxbTaHF4zrC86jt4DHhn74idfYTLoLqNRmWU7AdzHyeW64JBG0zBy7nGT+nQHRvsoU8dh4gfW1lK+10L6uEOh03SwmTCl+eKyaC5e2kRGXrbHpcC8UamU7qojPiKM/16bzs3Lmnj9r3f02FOKj4lk1tHjmHX0OAAOVNTy9trtLH/9v1jNRr7+bB1PX5LMLz78Lw4Nv77CuSz+ZcfE8N7uem586lceo0n1NRaP0Si5VCeEf3V8vFVzQzMF63Z43EFtrWsgtdP8xeQIMzM6NXjiYsM58Sr/LMrYOcMmp4ZRsrOalMRY9xykqoIGstMSKd2znw8vj+akJU0Yo40e5xlhj+S4yTlQV89Tc6J8yrDeanrq4mQWvLseh9a8fbkzT68+Opplux1ez11Z2+AckXKN+svluuCQRpPoVvsoU8dh4ovGRLLi1QKMV+S6L/NtfHEvZ6Urfr+6mYvyjXxZa2Xvu2UelwK3vFrARWMifZrY2HHoOik+msraBm5/eAkL772KW+Ojefjlj0jIhJXf1GOwWAADDdY4kmIOXYrrPJokE8OF6L+KkkqszVaqiio58F0BSkFzYwvJYSZMJmcDIynMxBEjnaMzRgW/ufxUzCZjT6cNuM4Z9n/HjWDBRxXsiYpg3NQ09w0wsY2Ki/KN/H51M2elKz5/cQ9h141359fed8s4Pi2LCSkNvWaYt/zq+Lrj6P2pWTV8W2YnOcb5c+t4Ka7zuWVyeGiQRpPoVneTFjPDomhZ2cQXpcWkjojBXNnEEUeH8XlBG0ekGSkvK+WeG+bx+srNfFFazMjkeBKbNH++1BkMvU1s7Dh0fecVsz1eX33+CSz/fCNL5yeSHGNi3VgbFy22cP4TxbQ6nM8diokJI6n8O3ejyNdlCoQYzqrLamiztrHzf9/TXNdEY52F5PBwUJARYSYnOZZJsVGc56dRoIHgLcMimgzsfrmEspgactMTuf30k3nw2TeISTVR0+zglBwz/9vURPX7dWyrcU5l+PWcs3h66Qr+7MPk7N7yq+PI1/l58PLXzUx7rNQ98gXOS3EdG0TeRv1lcnhwSKNpAHVckTs3PZE75s7y64JlnXs0vuzT0zHtkxa92VFYxjk/e5TI6Ggun2Lm8wIbT50fyc3LW7go38jy/37NK/ddw40PvMy0cSOJTmnwaWJj54nnc06e5vG60eo5SfLY8en83+mRkHUMP7/8LMqqLSz+/Du2lNbyyUv/YfqcY7tdpkBGm8Rw5HA4qK2so6KgjOLv99NYXsOIiHBSwwxkJcZy0/gs8kclY1DKPYoErvx66z/c8c+lIZNfvR3XXYa159cTf1jALx9/nYvyjR4ZdvkUM9tsBlLTkll4z1UsWrbG6+TszksF9Ce/bj25DrKO6XHUyNuof3cjUiKwpNE0QDqvyF1V0OC8mw38Fjydezi+7OPLMd7c8+QbJJqa2bS9kM1Kc9kUMy12zZRUA4s2txIdVeQ+95tFZT5PbOw8BH33E693miS5CZNyeD3XnVfMJj0pjjt/cAIAX+8u4bWX/8Mn7/+XlW1WXtrc7NGbi6vYKo0mMeS1NFkp3XeQgi17aSyuwIRicmoc+Ymx3HHWkURFhPV6GS1U88vX4zprz69fPv46X+8o4oum1i4ZZjQWMCk9gkXL1vi0VIA/8qs73Y7693Kc8D9pNA2Qzityp4yNg7nO7f4Inc49HG/Dtr31gnwd6t1RWMa32/fw1vxo5i5pJCslkfvPH0FyjIn7M2x812jhX7++gZv++Izr3N4nTXae2Nhej3voOj+Cf36xh6dudd5R55wkaeH1v97lU51H5WVyVF4mf77yNHYWVfDSf7eyx9JC9jHjmHLqVMxh8gwnMfTY2mzUlNew5T9fQ00DETYbR4xM5sfHjydlxNH9erxVKObXNXNORGvd5wzrmF8XL93Da3+5nXsffYX7z491Z9iW+nocWvPURTHcvGxDz/k1JzD51VFPo/5iYMnD4QbIvtJqknI8588k5cSwr7TaL+f37OE4h20raxu45J5/UVXX6HUfz16QwX37a2/uefINLp9iYmq6mQlJiuNTm7sMG3s7t6/1tJ9r+bYGLp9ioqqugUueL0Ip1e25emIwGJiQncafrjyd5687k9lmxf8eeYsVT77L7k270FqemiEGN4fDwdY13/HBY2/z6cOv0/CDRsTUAAAgAElEQVThRu4/bjzP/vhMnrjhHG48dzppiXH9fh5oMPIL8Ph77m2f7o7rScf8unyKiZseeLHLpa9Ts1pJNrf0qZ5A5ZcILTLSNEA6r8gNzltdc9MTD/vc3U0SbLS2djsB8bIjo/jnk3t48PpRHsf01lNr76X98zpngDo0vLy5iZX7SzGbnIHscGgqaqt45uejfa7n6qOj+cc/9lE4IprF31oBOFBpwYCDp7+qICtWMePxYhJjIw9rvZLI8DDOOCqPM47Ko6qukWUbd7H8w41Ejkzm5PmnEhYR5vO5hAgmh8PBt59/S9HXuwlramHOUWP5v3knEBcd6ffPCkZ+XTPnRPelt3++8Smfrtvisc8lr3a9Xb+3DOucXzfPjOCpDVUsssW7c8fh0FTUWJiSFtanegYiv0TwGX/3u9/5/6xF6wNw0sEtOSaapW9uIjzVTGS8mcp9Fva+W8Zv5p3NuJEpvZ+gB/96czXjzaWcMc65BkpUmIHCyhbeWruPF+Yn849VhVTWtzAlssK9T2NjIy1WK/vrFcfnRBEVZqDK0sqWUhvHTx3b7Wfd8OdFnJlu4cL8cACuOjKCequDttgsPnnqlyy4eBaNLW0cn1jLkRlhXPXKAS6cEkdFvdVrPR33iQwzMfWYY3n+tz9hwcWz+MWVZ3PNnJP4bMO3PP/DZNYcMPDeI3cx95Rp/OnZt3hqbjz/WFXInFOOIaofjZ2oiDCmjUnn4pnjyQs38t6Sz1j/2Tc4TAZSRqagOi2uJ7o3JXXa74Ndg798V7Hld8GuoSfbN+7gy1c+pfjzbzgvYwS3njmNeSdMYuLoFMIDdMl5oPOrytLKmn1NrPxyE0/NjecP7+7kvHFmzp0Q7d5nX1kdDc1tXHfsCI/jesqwzvkVG2agodVLfiXV8/uzk9zZ1Gxt81rPjJHhQcsv4UcxaZCQ41OGqYBcmvjycbne4UWg7p678K4nKCmv9NhWbWnmknzFX38wmr9/XseLW1oxGQ79WsprLNjtDhwYyEqOdW/PTE3u8fp5ztx7aGu1dtluDgun4N0HPOqptjQzwtRKrc0ZCN7q6bhPYmxkl8//+ysr4cBX3HlKPH//3HmXCdBlm796a5bGFv7z9W5e37SXxCnZTDl5KiOS43s/cJi7dPI1Q6aFuWTropDLr5amFta+vYamglJOyx/JVadN9bizbSAMZH4B2ByKq48M485T4vm/t/fz5g5NYuyhUbTyGgttdjzyC3rOsP7mV2JspNd6gJDKL9FP6VNhzKk+ZZg0moag9gmKS+c7JzZWNtiYv9TS7xVsD6eGp+ZEseDdRvcwesd63JPFu1lh19v3ccmr9V7PFYjv7eudRbz0xTbKjEamnj2d7PxRfj3/UCKNpsAo3nOAda99RkZEGDefMZWJOenBLmlABDvDOubXzcua3FnlLYuevSgmJPNL9EEfGk0yEXwI6mlNj84TEHcUlpF70X3sKirv9nz9mbToueqtc1KlL5PFe/s+ujuXr5PY++Ko8aP4+3WzeXjODGI3buet//cyW9d8JxPHRcAVbC/kzT++guPzb3jhurN47Cezh02DCbrPsCdf/7RLFgUiw3qb5N3TZPGevoeBzC8RGDIRfAjqaU0P8JyA2HG9krcf+qnX8/V1HZTOEzu9rXrb3WTxjpM4vX0f5TWNtNlh+pMDt15JelIct8w5lpu15uVPtvD2H19hzEmTGT89n6hOz9IS4nDs3rKbb95fx8yRSbxy07mEmYdnRHe7LpLjKxLNrR5Z5O8M8zYx/eknCthXHOnTZPFQyy/hX8Pzb+QQ1931/MraBi656xF+dYKRP326nhOOHM/m73eTl6DY/P1udhWVM25UqscKu/1ZB6VzD8vbqrft1/p7WuE21NYmUUpx1RnTuOqMaazYuJP3/vU+TXExnHLZLKLjZGhd9F/h1gI2vfslJ45K5tVbzsdoHN4XAbz93W/Pr3uOdebXNXNOpLK2oUuGJcRG9fiUgd4yzNsI0Y0nJHrNrztPiXfvE+r5JfxDGk3DyIvLv+TUrFayY+2cmtnKzQ++yIQkhdWmmZCk3D21jr0yoM8PifRl9drBvsLtudPHc+708ewpruSRZ1ZQGxnGqVecTky8PMtO+K6+xsLqF1YyKS6KxTedN+wbSz3pnF+Llq3hv1t2dcmwk48c1+NTBnrLsOGQX6L/fJ4IrpQyAhuBA1rrnp89IRPBQ057L+3Bk5qZlmHi3W1WfvFRE7kJRp65IJIb3m+myAJLHvwZ9z76So8TuGXSYlf7S6v52/KN1ISZOemy04hPiuv9oCEk1CeC9yW/BmIiuNaaLZ9spnL9dh66chaJMlLZo875tfmgjds+MVNZU8eoWDwyLC1pBIsuiet2ArdkmOgiQBPBfwZs619FIhg6TpBs76XlJBiJMBl4cUsrk1KMzM03kZ9s5KIJztW9b3rgRWaNgvuWlzMjtaXbSYudJ1YO9xVuR6cn8thPZvPQuUez9YWP+OCJd6mtrAt2WeKQkMmvyoNVvPXAEo5sbOLZW86XBlMP2jPs74tXeeRXToKRlsZ6xifCRRPM7gwbnwhNDRbuW17OaaPxOoG7uwnlwz3DhG98ujynlBoJnA/8CbgzoBUJv+k4QbLFauO7XRaWfW/AYIDdlTbiwhX3nmRkS5mNozMMPLrWTn1rFSovgZrGVkbFGtlSYvOYwA3dP2hXVriFjOR4Hrv+bCpqLNz37w8xjkrh5PmnYhzgNXXEIaGSX1prvvpgPdadRSy88jQS4uQmgt60Z9grH67DYbO688vhgP01NuqaFfeeZHBmWLqBR9Y6aLbZSY7UYG/j+52eE7jbeZtQLhkmfOHrnKZHgF8Csb3tKAKj4+Tsvj5U9+Kle/jhOSdxVlYTt53w/9u78/goq7P/458zM9kXQkgIgbAFSNgJO4IK4gIKAtYVi4hWcSmtWH2sy6+1T621T1ssKLggVgQEF2rFslgVFXdW2QQFZU1YspOELLOd3x8TMHsmyUzuWa736+XLzOSezJUEvlz3uc99TgxHT+Xx7wNQXHKW3ilxdKi8lHRr5im2Z9n58EAJj10UypOf2bhtZDuiug+vFiL1bbTb1I1/A1li2xheuvtKtn2fyTN/WUV41yQuvF62aTGI4fllLbey9unVzBjWiym3B98/yE3NL6ixMfiqs9w2qh2PjY/j6Kk8uiW34/plJxmQZGbcQNdSDKfyipiZ4WT1tzYevjCUJz8rYcawtvXn1+TYFm38K4JTo02TUmoykK213q6UGtfAcbOB2QAvPnQjs6eO8ViRoulnQTU3pVz23y0kxIbzwtdnKCsrp9yhCDXD0t05tG9bDrhuhy2rcHDzAAspMZCRBK9sK6J/QfXJjTUnVlZfb8m9yeLBYlh6CsvSU9h76AR//MsqeozLYNC4QUaXFTSak193PD6LS6+v99AmO7r3CLvf2sT8W8bTMTE4V5dvzihO1Qyblm7mpa8LWLmzhLKyciIiyigus7E9C9Yfdk3Gzi4oxmZz0CaMJuVXc294EcHJnTlNY4ApSqkjwOvAeKXUipoHaa0Xa62Haa2HScPkWVVvmV27aWuj19zPnaHdMyIccG1KadFWFj08i5SEGD68pysDuiWw+/U/cWztPLYt/wPblv+BHSv+l75dEpg+IIzUdqFMHxBG54QYXvn9bbVqmTnEdRY2KT2cPd/9yM0ZrksNM4dEuVVjsOmf2pE35k5jcFEJbz+xgqxDJ4wuKVg0Ob881TBprfnsjU8o+GQnK+6fFrQNU1PzC2pn2EMXRxJh0STERvLhPV1JSYhh7+t/rJVfaSkJLL0myu38mjkkirc3bmbNx1uqPScZJurTaNOktX5Ea52ite4G3AR8pLWe4fXKxHk1V6dtbPXYc2doCZEmfsizkhhp4ub+Fu7+y7JGV+CuOdny3K29NWs5N7Fy3f4Sbu5vAVsZICvcNkQpxQ1jB7DkF5fj2LSLdc++Q3FhidFlBTSj8ktrzfuL13Fdx7b85ZbxQb35c1PzC+rOsGvSzbRRJR7Nr4RoC4kh5YztaJVVuoVbZJ0mH1fX6rQ1V56t6Zvvj7PFauOl7YWYlMapFSalsDnymXlrar1f54Mt+6tNFnc6IafUSf8z++tdnyQrtxgTzmqX+UDWK2lIbFQED/1sDIeycpn30jpMPToyaspomSweIApzz/DRonf59eUZXNivq9HlGKo5+QV1Zxho0hPM9X6d5uQXQFZuebXLfOdIhom6yIa9Pq7myrOAWztj5xaWMO03/yBKn6VURXHFmMFE5e1p8Os0971Ey2z85gde/HgPg669iB4DU40up1l8fZ2mpmjJOk352QV8+eI6XrxzAlERYZ4syy+1JFOqZlhWiYkbB0Xxu8va1vt1JL9EszVhnSYZafJxzV15dtm6L0kMKefMWRsJUeX866PtWExaVrn1QZcO7snYAd1Z8J+veefDHUy8+2rCI+UfXH+TdzKPzS9v4KW7JhIRJndJQssypWqGJYfDK9scrPneVu/XkfwSrUFGmgLQuTM0c8UZFl8dyez/lOIMa8M7//iN3Ebr407mnuGBFR/Ta+Jweo/o7TdzYYJ9pCknK4cdSz/ghdkTCAsN8UZZQUUyTLQqL60ILvzEuTO0qemulXKnpoeQEFJe50rewrckJ7Thtfum0vHwSd75+5sU5RcZXZJoxKljp9m57ENevGuiNEweUl+GPbe69kreQrQmaZoC0H8372fXiTJGpij25dgYmaLYdaKM9zfvr7UZr/A9SinuunIYC6+/kC0vrmPXxzvxyoiwaLGTh0/x7cqPeWH2REJDZLaDp9SXYW98uE3ySxhKmiYf4OnRnwkj+zDnokTG9E2hb/eOjOmb4no8qEet9VJk5Ml3JbaNYcm9k+hfVMJ/nl6Nw+4wuiRRhc1qY9Mr7/H8nRMICeI7H72RIXVl2D1j2uGwW2ut9yQZJlqTnBr5AE/veVTfhEi73sHMgSG1VsKV/ZZ828/HDWRoajK//+Ny+k4eRe8RvY0uKehZK6ysm7eaZ34+DksQN0zgnT3b6sqworPlRJrttVbtlj3jRGuSpslgVVfL9dSeR+/Om1Pn+9zw0IJqq95OW7kZs0mxWPZb8nm9uySy8r6pLPlgB+/vPcK4m8fLPnYG+vi1jTxyeQZdOsQbXYqhvJFfUDvDzuVXzfWeZN9L0drk8pzBmrNabkvep66VcL393sIzLBYzd185nPszurHu729x+nh24y8SHrf7091cHBtBRlqK0aUYzsj8mtzTVGPfS8kw4X3SNBmorn2QvLXn0Sc7DrByTwXDFmUzbFE2Gc+cYteJMib1xOvvLTxrQI+OPH/reI6s3sSPO380upygUl5aztGN33DrpRlGl2I4I/Nr2KJslu0sY9+BI7JnnGhV0jS1sqqTFs+dPQFc+8pxlFJeO1t6d96c8xtbblv+B2ZOupA5FyUyMq0DIPst+Zv42Cievu0KQnb9wAdL1svdda3kvUXvMu/WSzGZgjM6fSW/zmXYXaPjZc840apkTlMrqzpp8dxkx4VfFhJnsTL82UziYyJaZQVbWT3X/ymleGjaBXyx7xh/f2IFE+69mriEOKPLCljbNmzh+n5d6NAu1uhSDOMr+QWSYcIY0jS1opqTJt/621y01tzw0AKenxzJPWtLeetvc1tlImNdk8WFfxrTtwsZ3ZO4+4V1XHDnVcQntW38RaJJbFYbmV/v52f3TzO6FMP4Un6BZJgwRnCOMRukrkmTrTWRUgS2qIgwFs+ewL4VH7Ljva1GlxNwtq79ml9NHOo329p4g+SXENI0tZq6Jk2+vXEzaz7eIhMZhUdEhIWy8I4J9CouZcvar40uJ2BYy63k7D7EmL5djC7FMJJfQrhI09RKGrrlXyYyCk+684rBDLTZ+Ortz3E6nUaX4/c2v/sVD1w13OgyDCX5JYSLzGlqJXVNWszKLWd7Fqw/LBMZhWfNGj+IhK0HWPbnVUx64DrCIsKMLslvFR3IZNiEwUaXYSjJLyFcGm2alFLhwKdAWOXxq7XWj3u7sEAjkxZFa5s8PI2h3ZO4b95qrvrNtYRHhhtdUqtraX79uOcQY3sme6s8vyH5JYSLO5fnKoDxWutBQAYwUSk1yrtlBTfZgFJ4SnJCG579+VjWz1tN2dkyo8sxQovy6+Bne5l2gez11xSSXyKQNdo0aZeSyochlf/JSnpeVHUtFCFaKik+ludmjue9p//F2aLg+oespfnlzC8iIS7aK7UFKskvEcjcmgiulDIrpXYC2cAHWuvN3i0reFVdC0XuRBGekhAXzQuzLuOD+f+m5ExJ4y8IIM3NL7vNTrvoCO8WF2Akv0Sgc6tp0lo7tNYZQAowQinVv+YxSqnZSqltSqlti9fIGUZzybonwlvaxkby4i8uZ+Mz71CUX2R0Oa2mqfm18a1PANjz+R6mZXRv3WL9nOSXCHRNWnJAa10IfAJMrONzi7XWw7TWw2ZPHeOh8oJLa26AKYJTm+gIFv/iCjYtepfC3EKjy2lV7ubXpdePcz3p1LSJCr7J880l+SWCQaNNk1IqUSkVV/lxBHAZ8J23CwtGda2FImdrwtNiosJZfOcEPnzmnYBfx6kl+XX6+0w6JrbxZnkBRfJLBAN31mlKBl5VSplxNVlvaq3Xeres4CQbUIrWEhURxiNTR/H0gn8z6dfXYDIH7Dq3zc6vUA3t28Z4tbhAIvkl/I3Wmv9u/4FjYcXMvmusW69ptGnSWu8Ggntlt1Yia6GI1jS0Z0ceMin+b/6/mDz32oBsnCS/Wo/kl/An2QXF/L9VnxLZtyszrhvv9utkRXAhgtig1GQetDt59tX/MuH2K40ux6fYzCZO5xeRFB9rdClCCA9xOJw8veYrduQWc8ndk4ltG9OkjbgD79RSCNEkQ9I6MTI6gm3rtxhdik9p36sjJ3OD5y5DIQLdD5k53LRgDbbhvZn6PzcQ24zL7zLSJITg7glD+PObn3J43xG69+1mdDk+Q8s6vkL4vYPHc5i/YTvF0RFM/X8/xxLS/NZHmiYhBAD/87Mx3PvSfwmPCCe5ewejyzFcl75deWfNlwxO62x0KUKIZig6W8bCDdv5vtzORbMnEd0mqsVfUy7PCSEACLGYef7OCXy29D1sVpvR5RiuXXI7DsvlOSH8jtaaFR/u5LalG4m4bAhX/nKKRxomkJEmIUQVFouZp6eP47Hn/sPVc39mdDmGOrT3MEdP5+NwODF74c7CCquNHQcyPf51Pa1DfCzdO7YzugwhGmW3O1j+8S7e259Jj3EDuf7aiz3+HtI0CSGq6ZYcz6TUJLas+ZIRU0cbXU6rO7DzB1b99S3CwmJJ6TGApe/v5BdXDqn3+NJyK98fOw1AcamVd7dnYjK57sZxOjUnSzQRsbUXybRa7cT1Ho0lNMw734iHnNn7LarsmNFlNEl5UQHJMeYm3RUFEGqGa0Z0O//7qyqpbawsduqjKqw2Xv1oN+9/d5x+Vw7nmuvGee29pGkSQtRy89gBfLNsIycPnSA5taPR5bSqd5/7iBvmziOlRx8APl04l+mXWDl6Kp/swrOs23GcsgobJSqakLAwyq0O4tJGYFImtFKkT7+LkCqNULpR34inDHdv0b9AUFyYx/K9W+v83JnN+wmx/1jtOVtRHu1jQwFQaK6/IJXIcNfjHp0SCG3BhGPRuNJyK69s3MlHB08weNporrvxEq+/p9LaC3eHfPms3HIihJ8rLbcy88UNTHtkeqN3m9zU79amndL7sJc+PVQtv777fC0532+lTWoGJksofUdPxGQ2G1We8FEVZaUc2PwBGo3T4aTo0DdEhYdQXlRI13ZhaK2ZOqIbHeJj6JQYZ3S5fu1sWQUvvr+Dr47nMvSaMXRr4R2/veJ7MzR5pFsZJk2TEKJem/Yc5o3j+Vw8veEzuEBumoRoCafTicNuw2G3890X6yktyCa09DR2m41O0ZoxfZIZ1L09iXHRXpk7F0i+P5rNPz/ZzdFyG0OnjqZzumfubG1K0xQwY4dlFVYqrHaPfk2zyUSM7HIugtjYAd35cO8xMr8/ToqHAkqIYGIymTCFhhESGkbG5ddX+1xpSTH/3fU5//rkBOWndhFjttOnYwzXjUkjKb5pK1UHKrvdwd4jp3l+4y5s8TGMvvUKBreJNqwerzRNdrsDu6P27ulZuWf4aPfxel+ntWbH0UJCwpv+A8kv00S2S27y6xpSVnKGGGcxFkvTfkzOirMM6tKmwT/wE4d0IzGu9vdpMim5Di58yv/eeBE3LVxLyu9mGF2KEAElMjqGPmN+2r7IWlFOQc5JHt7wH3TBdvq0D2NU746MHdg16Booq83Oyk17WPftMZL6duOCOdMIjzT+pgmvXJ6bPv1GHdE2qfabhUbSY9RElKp/CLJNfCIhYcb/YFqioryM4oK8ej/vdNj58av14LDW+lxpUQEJoQ7M5up/QSKVlX4p1a+DD+ieyKAenm0UhajL2i0H2FhqZWQ9d9PJ5TkhPK8g+ySZ326mYN/npLcP5foL00jvnGh0WV71xb5jvLPtIMfLbfQeO5D04elebxgNn9MkoeN5xYV5lJwprPbciT1fcDbvxPnH1pICkmNDzj+2W8u5MC0BS+V18n5dE+nTtX3rFCwCzi0L1nDpgzfUebYnTZMQ3pWffYL9H7+NyjvEZf0SuXn8gIAZfSqvsPHMui3sPllAQv/u9L2wH7GtuFG2NE0CALvNyskjP5x/fOrbL7EW5QBQXlZKQqiV0MpLjwkRikHd2gIwpGcyyQmyHomo7nR+Eb9+6wuueejGWp+TpkmI1qG15vttn5D11X+4vE9bbpuQgcnkfxPItda8uWkvG/YdxRoeRsakEXQxaMuioJwILmqzhITSuVff84+rflxT7slMPs/PQWvN6g2bwHYEAHuxax0SraFPhwh6dYglJjKMob27eLt84WOS4mMZEBfFyUMnSU6Vy8JCGEEpRe/hl9B7+CUc2PUVN89/i3Fpcdw+YZDPz4e12R18suMgb+86Qr7dSeoFfZjw8HS/GjFrdKRJKdUZWAZ0AJzAYq31goZeI2dqgenYgW8pO1tMSfZRbKcPAoqK4nw6xIbgcDiYMKADcVFhDOjRkagI/56XJupWfLacO5Z/zDW/rT7a5KsjTZJfIhgc3vcNh957mceuG8KA1NrziY2280Amb2w+wMH8YnqOHUjfUX0IDQs1uqzzPD3SZAce0FrvUErFANuVUh9orfe1qErhd7qk9av8aFStzznsdt7/5gucBTaeW/4lYSEm11YGsRaiQs1MGd6FsBALfbvLCIU/i4kKZ3BCDIf3HKL7gFSjy3GH5JcIeN37DqZr+gL+8daz9N15lAd/NsLQepxOJ9u/P87Kr77nZLmNhPQUBkwfz6B2rTdPyVsabZq01ieBk5UfFyul9gOdAAkdcZ7ZYqHPue0WLris2ucKck7x6vc7KS3MwbFxK3arlThzGUltIrlmVCqJcdEk1LH8gvBND/1sNDOeX+8XTZPklwgWJrOZC26ay6HtH3PbgneY/4uLaBMd0Wrvb7XZ2bz/OCu/2McZZaJ9vy5k3DWZkZGBtdZhky6AKqW6AYOBzd4oJpA8NWc6JSXFtZ6Pjo7hkYWrPPYaf9A2sQNtEyfWej7/VCbP7dtKUdYBIiinoriQru3CyeieQEb3RFIS47BYZLsKX2MymRjWMd6fRpsAya+mkPzyX6lDLyGp12DuXvwHFt0xmvjYKK+9V05BMau/+o5dx3IoMZtJ6t+VCx+8AXMA57bbTZNSKhr4FzBXa11Ux+dnA7MBZjzwJy6eMt1jRfqjkpJiUu94ttbzh5b8yqOv8WfxHVKI75By/rHT6cRhs/HF7i95b2sWpce/JpxyuidGMiqtAxmpSbSNjTSwYnHOr64eyc/9ZLQJJL+aSvLLv0XFxjHq9if45cu/59nbRnlsJN9ud5BdUMJbX+3nm6x8iIumz8UDuPC6sY3uTxko3PoulVIhuALnNa3123Udo7VeDCwGmUgpmsdkMmEKCyNt+E/7nNltVirKy3hnxyZe3vYtbUzlmGwl9O0Uy7Vj0omPiZTRKAOEWMyM7daeA9sPkDY0zehyGiT5JYJRZHQMo3/xBHOW/J4Fs4aT1Mx1j86WVfDvr79jx5FsTlfYadO1Pamj+3FFt2RMQbhXXqNNk3LdC/gysF9r/bT3SxLiJ5aQUCwhofQfOwXGTgFcu4lnn87i0fc3Up69k2iLjSFd45g4pBsdE9pIE9VK7pk0nBte2ODTTZPklwhm4ZHRXHjnn5jz/MOs/M1lhLiRjXa7g8Mn81n5xT5OlNspdjrpc/EA+o0fwggvXurzF+6MNI0BbgH2KKV2Vj73qNZ6vffKEqJ+YRGRdOzWi47degFgq6ggJ+ck//vJBkpz9tEhws6w7vFMvSCdaB/YqyhQmUwmesVEkHcqH/o1frxBJL9EUAuLiGTgtHt48OUl/OPOcXUuhJlfdJa12w6y+dBpcu1O2qYkMGDqGNI7xBtQsW9z5+65zwGfXIMl0JzJyyXryME6n2/Ir6eMxO6sfUXBYlI8827dc14DadJmSFgY7VO60f66ewDXzuH7Txxl7Yo1RNkKSW5jYerw7gxL72RwpYHn/snDeejfX8Cl9xtdSp0kv1pPa+YXBFaGeVuH1D7kZ13C+q0HmTwyjQqrjXVbD/D1j6c5VlxKWHws/cYPYviEEYQH2N1unhYcM7cMEB0dU+cEyOjomHpfo5128tbWvoKgnfYG38vu1HSds6zW80cXzqz3NYE8aTMyOoauaf3pmtYfgMK8bF7ZtpF5731ClzYmxvZuz9hB3WUBTg9o3zYGR16tedXCz/l6fkFgZ5inOex2Thw7zAMfr2XltjQssVGkXtCbnqP6MaQV93gLBNI0eUlzznTiEjtICHhBXLv2ZEyYDhOmU1yYz8dHf+CVF1aTEu1k2gRQqC0AABHVSURBVLAULs7o6VfL+PuaS9I7Gl2C8DDJr8CSdzqLTWtf4/fLHyU+qa3R5fg1aZo8oK5h4ryTmSiTGWWuPvGu6pDzPRMy0FU+r+128p68DtCEtWl//nl7ccPD28J9MXHxxMSNIG3QCMpLS3hz66e8sPAz2prLuXVcL0b27Wp0iX7n6uG+OxFcNE7yK/C179SVNnHtCIvwna1L/JU0TR5Q1zBx4fw7SJzyWyLaV9/YtuqQszab6TxnxfnHJ5fOJXnWfKynDxGZ3KPO1wjPCY+MJmPsVTD2Kux2Gy9vWMUzH3zMmNQYZl0+kMhwCRh3JLat/5KN8H2SX8Ehtc9gvlq3mcumjze6FL8mTVMT1XVWVpCbzanjh+jQuf6F/jJX/BZtLcPpcHDv5Mp9gTTY8jKxxLsub2inA6etAoDS7KOuYxx2nA77+dc4bVYwm1FAfHvXPm5Oh53DC27GHFV92NVprai3nuZO2gxUFksIw692hfuxA3u4a+lqIqwFPHztUFI7JRhcnRCeUzPDCnKzyTpyELPZXG+GeSq/wJVhymyull9Hnr0F7XRUy7CG8gskw5rqkulzWPrE7dI0tZA0TU1U11nZ7oX34HA4GnydtpaRPGsB1uzDRHZwBdOxJb9EaydKnbsFVKMsoWC2YGnz007VymQ+P1Hy+Cv3kXzrfKzZh+nSsw8AloP7yF33NAlTHqr2nqdXPVJ/Pc2ctBkMuqQNoEvaAMpLS/jjmiWEnfmG+68eSO+uvrd7uBBNVTPDdi+8h7CELlTkHqv3NZ7KL3BlWLsr76uWX5b4Tpxa/kC1DGsov0AyrKnaJnbAYpY741pKmiYfc25CsinEdWfXuTO3Rl6EMluIbF99Po7JXP+vVyZtNi48Mpox0+dSUV7GX9YsIWzth9wzoS8ZPWXisxB1aW5+mULCamVYQ/kFkmHNoUwWjv+QSeeeKY0fLOokTVMV7qz7kXfyOPlP3Vjt89rpIHvtfDrNWXT+OcfZAnLe/b8qjwvJXDQT7XRgiXFd7nEU55L77l9RJtevwVlWxMmlc8FspsOMv1d7j7Js11mgdthdl+h0/Ts9ZL32CE5rKU6ng8dmTa7z+xDuCwuPYMyNv8JaUc4z/1lK6Psf8tSM0bIPnvA5zckw7XRwbMkvUZaw8xnmrfwCV4Y1lF9QmWE18qvm9yGaLqVzT3KycqVpagFpmqpwa90Ps4WUX75a7fO2vExOrXy41hlOwuQHwFR5d4nDTkhiN04uu5/kWQsAyFx0KwmTfnP+eI1GKROn3/gdp5a5FgvUDjva6SQkoTMAymzBFBLa4HJ9TmtprUt4tb4P0WShYeFccN3dFOXncucr87i6TwwzLx8oyxUIn9GcDLPlZaK1k9OrHq12nDfyC1wZ1thyo05rKUk3PFEtv2p9H6LJ2ndPY+9nXzNkbIbRpfgtaZqqqG8UCYf9pzMeDdbcYyhlIqSdq1sPia/jco1ShMZ3whLqGqYuPXUIVXP5+nPBUTkMrRx2MJlRZgvJt84HwHr6EHkbnmm4cK3RDvv5szl7UQ4nlt4HTgeFlpDzh8mtv54RG5/AFff+mb3bPuHW+W8zb9YYuYNM+IS6RpGAWhlmy8usll+1xn1aOb+cNmutDMtdO69afoFkWEsNGHs13/5tndFl+DVpmqqqYxTJmnuM3Hf/ev7srXD+HYS0S3GdndmtAGitQVHtDC//qRtrBZHTVgFOB9bsw1S+EEt8J35KHyeg0A47x5+dUfmc66uc+OccwDVsfm6Y3J7katbyTp8AqDKcrujw878CrtGRc6re+tucFX/FT5RSpA2/hC79hvPrV5/kpmFJTL2gl9FliWBXI8OsuccITejCiX/OqZZhWjur5xeqWoZ5K7/AlWGn3/hdtfxSJjPa6aieYbfMq5ZfIBkmjCdNUzMoZUKhCKk8C7NZK1Cqkd2jTebzkyPP3X1y/lMhrvWAnLYK14RIk4luv1oO1L7T5JxDS37Fk0vXAvDYrMnVGrZtT92IKSTsfCjWReYFeEZ4ZDTj73mK99Yuxfrpfq6/uE/jLxLCYLXyyxLacIa1Yn6BK8Mau+wtGSaMIE2TD3DarFU+rkA7nYbUIRtgNt+QybP4ZMMKKj76lhnj+xldjhCtRvLLf+hGJuCLxknTVIVCNTg6A2AKDefEP+fgKC3EVDlJ0ul0VFmr5BxN5gu3/3SMww4ocNjPDzE7bVZOLX8AVXnMuT/OusYx2W/+DnvlQnDnNDgErRS23ONopx2n5adfsalWjdXJBpgtk3HlDDa//zrmTfuZPlZGnETrqyvDtK7exJhCwzn1+iM18ssM1S7IGZhf4Mqw/Kxq+QUNZ5jkV+O2rV9F5/RORpfh14K6aap5ZqIr/9orOD8B0l7jNo+MexcCjQ8vD3vkzWrH1OXeySPocseiWs8fXTiT59Zuafo3VCk0ui0R7V2L1XXq9tM8m4p2srK1tw284ibWLv49Y3oX0EU2xhReVNfIikbXyq+aJ3QZ9y706fwCV4aZLZZq+QWSYS1VlHOSfpf1aPxAUa+gbppqnpnkP3UDWS/eidaO82dY2uFAOx21zlaqnik1d0Ki0k5OLJ1b5/NNUfP97cW5HF04E5MyVQsZmSDZOobeMJen336S+bNluwLhPXWNrOQ/dQPHq4wQaYcDrQBH9Qzz5fwCV4Zlv/H7Wk2SZFjLZJ/OZESsNE0t0WjTpJT6JzAZyNZa9/d+ScYZ9sibAI2eYdXU3OvlnlrRVq7X+5aYuHhO2GIor7ARHhbS+AuEVwVbhkl+ifpUlJeQPjjN6DL8mjsjTUuBhcCyRo4LCjLZULij18VTeePTDdx6+SCjSxGSYdVIhgWnkjMFWK1lRpfh9xptmrTWnyqlunm/FP/gycmGvrbOiK/V48/ad+7JN1/nc6vRhQjJsBo8lWG+lhe+Vo+v+XrtcvqNSje6DL8X1HOajOZrZ3W+Vo8/CwuPoKThGzGF8Gu+lhe+Vo+v2fPVRubMv93oMvyex5ompdRsYDbAjAf+xMVTpnvqS3uNnJkIbynIOcXQrvLnyF9IfolAVpBzGpSN+KR4o0vxex5rmrTWi4HFAC99esgvVtCSMxPhLTk/7iYjKc7oMoSbJL9EIFv74hNcOetyo8sICA2vdiiEaJasXZ9yxZDUxg8UQggvslkrOHZoH4PGDjS6lIDgzpIDq4BxQIJSKhN4XGv9srcL81UyJC4ac3DLB1zeuw1ms5yT+ALJsOokw4LL2wsfZ9SEwYSGhRpdSkBw5+4537+434pkSFw0pKggl7yta7njviuMLkVUkgyrTjIseJw6+gNZh3dxx5OPGl1KwJC754TwEIfdztY35vP36cONLkUIEeS01qz82wPc9OC1RpcSUOT6gRAeYLfb2LTkcf4wpScp7WXPOSGEsbZseJ2OqfGkD5W1mTxJmiYhWujUkQNseuZ+/nRNGr27JBpdjhAiyB377hs+WfMKN//2RqNLCThyeU6IFti7aQ36h0289sAEmfgthDBcRXkZq55+hLv/ejtRsVFGlxNwpGkSohmKC/PZuupvXD84gWtmj0cpZXRJQgjBCw/P4Ib7p5DSo5PRpQQkaZqEaAKnw8HOD9+Cw1+ycOaFtGsjZ3JCCN/w5j9+S5f09gy8cIDRpQQsaZqEcIPT4WDb+hXYju5g1iVpjL3qChldEkL4jJV//Q2Rbcq55dGbjS4loEnTJEQDbNYKdqxbhj1rL3dfkc6oa2T9JSGE79Bas+KpXxOfrLjpQVmSzNukaRKiDuWlZ9n53ms4Tuxh7qQBZFwvzZIQwrdorXn1iXvpkBrO9ff9zOhygoI0TUJUceLwAQ5+9AZh5Tk8OHUQfbpNNLokIYSoRWvNy4/fSfd+bZl27xSjywka0jSJoFeUn8sP2z7i7MGvyUiJ5oVbMoiODDO6LCGEqFPOiWO8/PgdjJk8nKtulxO71iRNkwhKBTmnOLTlA4qO7KJ7Wwuzhndl5FWXGl2WEEI06NN/LeGL997g1/PvokOXJKPLCTrSNImgkZ99goNfrKPsxPf0Sgjh7pGpZEy9xOiyhBCiUTablWVP/BLMxfzxjUexhMg/30aQn7oIWLaKCgpyT/HDl+uwZx8ivX0o/3NhGmkp42S5ACGE3/hx71benP8Yk24bz+irR0t+GUiaJhEwnA4HxYX5HNr6IYWZB4jVRQzuEsctl3Sjc5I0SkII/1JeVso7ix7ndOZ+HnxhDvFJshm40aRpEn5La015aQlHdn5Ozv6vCLGX0r9jJL/on0KfywYSGR5qdIlCCNFkWmt2fLSG91ctYsykYdz51G+NLklUcqtpUkpNBBYAZmCJ1vovXq1KiDo47HbyT2eSuecrSo7txYKDlBiY0CeZi2YNliZJ1EnyS/iTk0d/4I15DxGXGMZDL91HXEIbo0sSVTTaNCmlzMAi4HIgE9iqlHpXa73P28WJ4FZcmMfhnZ9RfOIw4Y5iHCX5pCVH88ConnSeMIJQmQgpGiH5JfxFRVkpy/88h6LCk9z4wLX0HppudEmiDu78qzMC+EFrfQhAKfU6MBWQ0BEeU15awo87Pqf41BHMpTnYinNJaRvO7DE9SchIIjkhzegShX+S/BI+rexsCf9a8BhZR/dz5cxLGTPlNqNLEg1wp2nqBByv8jgTGNnQCxJi5DKJaJjNZmXDK/+gvKiA5BgTZgXTx6SROCiF1JShRpcnmsBqs+PDf+Mlv4TP+nLDatYsf46rpl/GIwvvlJtVDBIZEuX2se40TXX9FnWtg5SaDcyufHiX1nqx21V4kFJqtlHv3RL+WHdLa75hxLOeLMctwfhz9jYfbzH8Kr/O1eLLv++6SM3Nc83gufzt0blNeo0v1N1U/lhzfUxuHJMJdK7yOAU4UfMgrfVirfWwyv+M/OHMbvwQn+SPdUvNrcMfa/YV/pZf4J+/b6m59fhj3f5Yc53caZq2Ar2UUt2VUqHATcC73i1LCCE8QvJLCOExjV6e01rblVJzgP/iumX3n1rrb71emRBCtJDklxDCk9y6Z1trvR5Y7+VaPMXoofXm8se6pebW4Y81+ww/yy/wz9+31Nx6/LFuf6y5TkrrWnMihRBCCCFEDe7MaRJCCCGECHoB1zQppcxKqW+UUmuNrsUdSqkjSqk9SqmdSqltRtfjDqVUnFJqtVLqO6XUfqXUBUbX1BilVHrlz/jcf0VKqabd62sApdT9SqlvlVJ7lVKrlFLhRtckvMff8gskw1qD5JfvCLjLc0qp3wDDgFit9WSj62mMUuoIMExrnWt0Le5SSr0KfKa1XlJ5R1Kk1rrQ6LrcVbm1RhYwUmt91Oh66qOU6gR8DvTVWpcppd4E1mutlxpbmfAWf8svkAxrbZJfxgqokSalVAowCVhidC2BSikVC1wMvAygtbb6S9hUcSnwoy8HThUWIEIpZQEiqWONIREYJL9aRwBkmOSXgQKqaQLmAw8BTqMLaQINvK+U2l65KrGvSwVygFcqLyMsUUq5vwa9b7gJWGV0EY3RWmcBfweOASeBM1rr942tSniRP+YXSIa1NskvAwVM06SUmgxka623G11LE43RWg8BrgR+qZS62OiCGmEBhgDPa60HA2eBh40tyX2VQ/FTgLeMrqUxSqm2uDaX7Q50BKKUUjOMrUp4gx/nF0iGtRrJL+MFTNMEjAGmVF5ffx0Yr5RaYWxJjdNan6j8fzbwb1y7svuyTCBTa7258vFqXAHkL64EdmitTxtdiBsuAw5rrXO01jbgbWC0wTUJ7/DL/ALJsFYm+WWwgGmatNaPaK1TtNbdcA1ffqS19umuVikVpZSKOfcxcAWw19iqGqa1PgUcV0qlVz51KbDPwJKaajp+MLRd6RgwSikVqVzbn18K7De4JuEF/phfIBlmAMkvg7m1IrjwmiTg364/T1iAlVrr94wtyS2/Al6rHCo+BNxmcD1uUUpFApcDdxldizu01puVUquBHYAd+IYAWllXBATJsFYi+eUbAm7JASGEEEIIbwiYy3NCCCGEEN4kTZMQQgghhBukaRJCCCGEcIM0TUIIIYQQbpCmSQghhBDCDdI0CSGEEEK4QZomIYQQQgg3SNMkhBBCCOGG/w+WUl+4PUG2fQAAAABJRU5ErkJggg==\n", "text/plain": [ "
    " ] @@ -304,11 +292,11 @@ "from sklearn.svm import SVC\n", "\n", "# Initializing Classifiers\n", - "clf1 = LogisticRegression(random_state=1)\n", + "clf1 = LogisticRegression(random_state=1, solver='lbfgs')\n", "clf2 = RandomForestClassifier(n_estimators=100, \n", " random_state=1)\n", "clf3 = GaussianNB()\n", - "clf4 = SVC()" + "clf4 = SVC(gamma='auto')" ] }, { @@ -351,19 +339,9 @@ "execution_count": 10, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n", - " FutureWarning)\n", - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n", - " \"avoid this warning.\", FutureWarning)\n" - ] - }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlUAAAHiCAYAAADBITniAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzsnXeYU1X6gN+TMjXTh6EOvSqKgKKrWMAKgqj8BBFFLAuLgLq6a1lXd3XXtaMI6FoWFRUEaSqIgApYUKrSe29TMj3T0s7vjxSSmcxMMslMppz3eeIzk3vvyUkk73zn3O98R0gpUSgUCoVCoVAEhybcHVAoFAqFQqFoCqigSqFQKBQKhSIEqKBKoVAoFAqFIgSooEqhUCgUCoUiBKigSqFQKBQKhSIEqKBKoVAoFAqFIgSooKqJI4QYK4RYVctrdwkhrgpxlxo8QogVQoi7w90PhUIRGoQQVwkhToa7H4qmjwqqGhBCiKNCiGtC2aaU8lMp5XV+vPaHQoh/V7j2XCnl2kBeTwjRUQghhRAm5+OoEOKJALsdVqSUQ6SUH4W7HwpFU8bphlKnJzKcDjKEu1/B4vRfsYcD8+v59VUAGUZUUKWoKxKllAbg/4CnhRDXhvoFhBC6ULepUCjqleFOT1wA9AWeDHN/QkUfKaXB+UgM9GLltsaLCqoaCUKIPwohDgohcoUQXwoh2ngcu04IsU8IUSCEeEsIsU4Icb/z2HghxE/On4UQ4nUhRJbz3O1CiN5CiAnAWOAx58jqK+f57pkzIYRWCPE3IcQhIUSREGKLECK9pn5LKTcDu3BI09XfNkKIRUKIbCHEESHEgx7HooUQHwkh8oQQe4QQj3mOupx9elwIsR0oFkLoamhvgBBisxCiUAiRKYSY5nw+SgjxiRAiRwiRL4TYJIRo6Ty21uPz0wgh/i6EOOb83OYIIRKcx1yzcncLIY4LIYxCiKcC/p+rUDRzpJQZwEq8PXGjEOI353f3hBDinx7Hqv3uOT3yodMju4GLPF9PCNHL+T3PF440h5s8jn3o9OgKpw9/FkK0EkK84WxvrxCib23eZw0el0KIyUKIA8AB53M9hRCrnefvE0KM8jh/qBBit9PHp4QQfxFCxAIrgDbi7ExZm0odUdQdUkr1aCAP4ChwjY/nBwNGoB8QCcwAfnAeSwUKgVsBHfAQYAHudx4fD/zk/Pl6YAuQCAigF9DaeexD4N9V9Qf4K7AD6OG8tg+Q4qOvHQEJ6Jy/XwKUALc4f9c4+/AMEAF0Bg4D1zuPvwisA5KAdsB24GSFPv0OpAPRfrT3C3CX82cDcInz54nAV0AMoAX6A/HOY2s9Pr97gYPOdg3AYuDjCu/1PWdf+gDlQK9w/1tSD/Vo6I8Kfmnn9Mt0j+NXAec5v+PnA5nAzc5j1X73nB75EUh2umKnyyOA3vmd/pvTGYOBIqCH8/iHOHzbH4gCvgeOAOOcrvg3sKaa9yWBrj6er9LjHtetdvY5GogFTgD34HB7P+f15zrPPwNc7vw5Cejn8bmd9Of/gXqE/qFmqhoHY4HZUsqtUspyHFPkfxBCdASGAruklIullFbgTSCjinYsQBzQExBSyj1SyjN+9uF+4O9Syn3SwTYpZU415xuFEKU4gpq3gKXO5y8CWkgpn5NSmqWUh3GI8Xbn8VHAf6SUeVLKk873U5E3pZQnpJSlfrRnAboKIVKllCYp5a8ez6fgkJ9NSrlFSlno47XGAtOklIellCYcn/3twnt6/lkpZamUchuwDYfgFQpFzSwVQhThCB6ygH+4Dkgp10opd0gp7VLK7cA84MoK11f13RsFPC+lzJVSnsDbI5fgGCC96HTG98AyYIzHOUucTigDlgBlUso5UkobMB/Hrcrq2OqcBcsXQrheuzqPu3jB2edSYBhwVEr5gZTSKqXcCizCkVIBDoedI4SId/pyaw19UtQDKqhqHLQBjrl+cf5xzwHaOo+d8DgmAZ9Jik55zARmAZlCiHeFEPF+9iEdOBRAn1NxiOsvOEZOeufzHXBMTbuEk49jxNjSedzr/VT42ddzNbV3H9Ad2Ou8xTfM+fzHOG43fCaEOC2EeFkIoacyXp+982edR/vgHcSWON+3QqGomZullHE4HNEThzcAEEJcLIRY47ytXwD8yfO4k6q+exU94vkdbgOckFLaKxxv6/F7psfPpT5+r+k73k9Kmeh8uNIRqvO4i4puu7iC28YCrZzHR+IYVB8TjpSPP9TQJ0U9oIKqxsFpHF8wAJz3zVOAUzimgNt5HBOev1dESvmmlLI/cC6OYOOvrkM19OEE0CWQTjtngF4DyoAHPNo54iGcRCllnJRyqPO41/vBEcxVarpCv6psT0p5QEo5BkgDXgIWCiFipZQWKeWzUspzgEtxjArH+Xgtr88eaA9Y8ZasQqEIAinlOhy33V71eHou8CWQLqVMAP6LI/XAH87g7Y72Hj+fBtKFEJoKx08F2O1Aqc7jLiq6bV0FtxmklJMApJSbpJQjcLhtKbDARxuKekYFVQ0PvTOJ2vXQ4ZDLPUKIC4QQkcB/gA1SyqPAcuA8IcTNznMnc3Yk44UQ4iLn6E8PFOMIdmzOw5k48oaq4n3gX0KIbsLB+UKIFD/f04s4kuCjgI1AoXAkm0cLRwJ8byGEK5F0AfCkECJJCNEWmFJD29W2J4S4UwjRwjkqdS1ttgkhBgkhzhNCaHHkpFk8PgtP5gF/FkJ0Eo7l3v8B5jtvtSoUitDxBnCtEMKVrB4H5Eopy4QQA4A7AmjL0yPtgKkexzbg8N9jQgi9cNTiGw58FvQ7qJ7qPO6LZUB3IcRdzn7qnQ7vJYSIEI4ahAlSSgsOh3m6PEU4F9Qo6hcVVDU8vsYxvex6/FNK+R3wNI776WdwzBjdDiClNAK3AS/jmEo+B9iMI2mzIvE48o3ycExD53B2ZPg/HPfn84UQS31cOw2HqFbh+AL/D0cypT8sd77mH505CcNxrPI5giPx8n3AJYDncNy+PAJ8Cyys4r0AjtmwGtq7AdglhDAB04HbnXkSrZxtFwJ7cCTHf+LjJWbjuFX4g7P9MrwFrVAoQoCUMhuYg8N14Jjdfs6Zc/UMZ2di/OFZHI47gsNZH3u8jhm4CRiCwxdvAeOklHuDfQ/VUZ3Hqzi/CLjOec5pHLc6X8KR5A5wF3BUCFGI49bonc7r9uIYDB52+lyt/qtHhCMFR9FUcE5pnwTGSinXhLs/wSKEmIQjEKqYoKpQKBQKRYNCzVQ1AYQQ1wshEp1Tyn/DkXfwaw2XNUiEEK2FEJcJR32oHsCjOFbfKBQKhULRoFFVW5sGf8Bxvz4C2I1jRU1peLtUayKAd4BOOHKgPsMxPa9QKBQKRYNG3f5TKBQKhUKhCAHq9p9CoVAoFApFCFBBlUKhUCgUCkUICE9O1foZ6p6jQtGMkGnnIroO9rdwY8Nm73JJ7uFw90JRz/zpgy30v+uZcHdDEQZ0WsE9l3Xyy19qpkqhUNQZuYXFPPTOd7z2pdqWTKFQNH3U6j+FQhFyys0W3l6+lQ0nLVw05u8M6NUx3F1SKBSKOkcFVQqFImRIKfl262HeW3uEnkPuYfDw88PdJYVCoag3VFClUChCwq6jmTz7+VZa97+eQZMnodPpw90lhUKhqFcaTFBlR1CsTcami8L/jcjrE4nWWkasLReN2gRcoXCz51gW05dtozS+E1c/OB2NVhvuLtU7Dd9foBymUNQ9DSaoKtYmozckYhA2RAN0kpRQLqMoNkGcLSfc3VEowk5eYQlvff07+8sS6Tf2X8QY4sLdpbDR0P0FymEKRX0QdFAlhIgCfsCxc7YOWCil/Eeg7dh0UQ1aSEJAJDbKdFFgC3dvFIrwYbfb+eS7HXy9K5fzb32QgW3ah7tLQREKhzV0f4FymEJRH4RipqocGCylNAkh9MBPQogVUsoAN/QVDVpIgLN/DbyTCkUdsnnvcV76ajcdrhzN1VMGhrs7oSIEDmv4/gLlMIWirgm6TpV0YHL+qnc+Gu0N+29+3EKPoZPoev0EXnxvYbi7o1A0CHYeyeSu11fy7p5oBk95jR79mkxA1aQcpvylUISXkORUCSG0wBagKzBLSrkhFO3WNzabjcn/fofV7z9Hu5YpXDT6UW4aNIBzujbu2xsKRW05lZ3PC59vpDiuIwMfeL3JJqE3BYcpfykU4SckQZWU0gZcIIRIBJYIIXpLKXd6niOEmABMAHjnsdFMGHFZrV9vwJ1PYSworfR8akI0Gz95vtbtbtxxgK7tW9M5vRUAtw+5nC++36CkpGh2lJSZeX/lNtaf0TBg1D+IS0wOd5fqlJoc5uWvZ6cw4ZrutX4t5S+FoukS0tV/Usp8IcRa4AZgZ4Vj7wLvAkHv/WcsKOXcia9Xen7XO38OpllOZeaQ3irV/Xu7Vqls2L4vqDYVisaE3W5n5ZbDfPDDEc658X6uvrF3uLtUr1TlMC9/Bbn3n/KXQtF0CTqnSgjRwjm6QwgRDVwD7A223XAgZeVYT6ikTkUzYd/xLMa8upJVpi4Mnvwqbbs2j4CqqThM+UuhCD+hmKlqDXzkzEnQAAuklMtC0G69065VKicyjO7fT2YYaZPWtG97KBR7j2czY/l2CmPac9XUaej0EeHuUn3TJBym/KVQhJ+ggyop5Xagbwj6EnYu6t2NA8dOc+RkBm3TUvhsxY/Mffkv4e6WQlEnFBaXMm3pFo6aE+k/9jmiYgzh7lJYaCoOU/5SKMJPg6mo3hDQ6bTMfGoi1//xn9jsdu695RrO7aaSPBVNCyklc1Zv44tdhVw4cjKXNfLinQoHyl8KRfhplEFVakK0z6TO1ITooNseeuWFDL3ywqDbUSgaIl/8coB5vxwn/dIR3DDlCkRjqFjZxFD+UiiaLo0yqApm2bFC0Rw5dMrIM59tJrX3lVz94MPh7k6zRvlLoWi6NMqgSqFQ+MfJrDxeXbKFHH0rBk6ehk6nD3eXFAqFosmigiqFoglSXFrO7NXb+eW0oP+tT3FucmrNFykUCoUiKFRQpVA0IaSULPl5D/M2ZXPOkLsZNPTccHdJoVAomg0qqFIomggrtxzmgzX7Set7HddN/Wu4u6NQKBTNjqArqiuaPsZ8EyOf+C85BcXh7orCB4dOGfnTW9/yVUYygx6cznlXDg93lxSKBoPyl6I+UUGVB/c+NZ20gXfR+6Yp4e5Kg2LO8vXkZZzgo2U/h7srCg8KTKU8/dE6nvvmNL3H/Yc+149Fo1Ff6eaK8pdvlL8U9YkysAfjb7mab979Z7i70aAw5ptYtm4Tb9+ayrJ1m9RorwFgs9n59Lvt3PfeJqIHTWHg3U8SGRV8jSNF40b5qzLKX4r6plEHVca8QkZOeY6c/MKQtHfFhb1JTmieW3VUxZzl6xnWVUOPtEiGddWo0V6YWbXlMHdMX8N2w2VcN+VFWqhq6I0W5a+6R/lLUd806qBqzuKV5J06yEeLVoa7K00S1yhvXL9YAMb1i1WjvTDxy67j3PnaCpaeSeWaB6fRtb+qht7YUf6qW5S/FOGg0a7+M+YVsmz1Gt6+tSWTlq/h7pHXk5IYH+5uNSlco7xUg+OfSapB5x7tPTL2uiqvM+abmPjiJ7z75F2kJMTWV3erZcCkWRiLyis9nxoXyca3J4ehR/5xMiuP6V9tIz+hB5dOeg19RGS4u6QIAcpfdU9T8tcLU8ZgMhVVet5giOPJmfPC0CNFVTTaoGrO4pUM6yLo0TKKYV1K+GjRSh6577Zwd6tJsXbrfk5nlTN3R5bX820y91crJc/E0OrOq0+MReWc+8fXKj2/671Hw9CbmiktN/PeN9vYcAb63fo43VXxziaF8lfd05T8ZTIV0fn+GZWeP/z+1DD0RlEdjTKoco3yFoxKAGBc/3hGLVCjvVDz5WuBryLyTAydtGwTdw+7rMGM9hoDUkqW/bqPD34+Re9hf+SqG3uFu0uKEKP8VT8ofynCQaPMqXKN8rymdbuIoHMTxvzlFf4w5jH2HT1Fu0H38L9Fq0LR3WaFSgytPd/9fpSxr69mpakrNzz0GuldVUDVFFH+argofymCpVHOVK3duI3TZ8qZu+OM1/NtjNuCmkKf96qqQh0MrlHeglFxgCMxdNQCNdqrid1HzvDal9uJ6tSfQQ9OD3d3FHWM8lfDRPlLEQoaZVD15Tv/DncXqqQhJjnWF7VNDG2u5BWW8OrijZzQpnPJ/a+gj1RJ6M0B5a+GifKXIhQEHVQJIdKBOUArwA68K6VstsPthpjkWF/UNjHUk7papZcaF+kzKT01rv4DGYvVxuc/7GHpjlz63jqVy1urWlPhRDnsLMpfwfmrrlbpGQxxPpPSDYa4WrepqBtCMVNlBR6VUm4VQsQBW4QQq6WUu0PQdqOiuSc51iYxtCJ1tUqvIZRNkFKybvsx3vr2IF2uGMk1D1ymak01DJTDUP4Khb/qapWeKpvQeAg6UV1KeUZKudX5cxGwB2hbi5aQMtje1C2O/lXdyUCTHEO50WddbhqqNiQNnl/2nOTuN1Yy72g81zw4jc59B6qAqoEQGoc1fH9B9Q6rTZJ2qNyg/KVoKoR09Z8QoiPQF9jg49gEIcRmIcTmd7+o/GXVWssol9oGKyYpoVxq0VrLfB6vTfXeUG706W9btRGM2pDUwYBJs+h857RKjwGTZlV5zensAh77YB2zdwoumfAqF954lwqmGjBVOczLXwu+qXRdQ/cXVO+w2lYfD5UbAmknUIcpf53lhSljeGr8sEqPF6aMCXfXmgwhS1QXQhiARcDDUspKm1lJKd8F3gVg/YxK6om15VJsgjJdFNAQ/+hItNYiYm25Po8GmuQYyqn2QNoKNGfCs+0JX2zk+637+eiZe5rVbQEXgdyaNFusvLFkE78VxHLRLY/TLTGlPrqoCILqHOblr73LJbmHva5t+P6C6hxWmyTtUDks0HYCcZjylzeqiGjdE5KgSgihxyGjT6WUi2vThgZJnC0HbKHoUf0TaJKj91R7WVCJof62VRsJerZ9Zds8Pt9xrFkmsQbCNxv38+7aY/Qedj+Du/cOd3cUfhCsw5qbvyB0DgukndoEYMpfivokFKv/BPA/YI+UclrwXWqcBJLkGMp6KIG0FagEPdu2WO0M7WTj+4OSJd9t8Luvga7ma0ir9ALl510nmLlyL6nnXcX1Dz+ERtMoa+s2O5TDAk/SDpXDAm2nNgFYMP6Csw4z5hWy+LuN7ud9rehTq/QUobD+ZcBdwGAhxO/Ox9AQtNtkqW6qva7aqm3Ol6vtnAITHZO03NpTT6q+zO++um6ZVXxUDLRc+UoVn0+Ni+TwJ480iNV7VWE2m7lv+krm7NUzaOrrnD/oZhVQNS6UwwIkVA4LpJ1AHRYKf8FZh6X0v4HO989wPzxLJ7hylSqWUzAY4nj+w2Vq9V4zIuiZKinlTzTcJIIGSSjqoQTaVm1yJlxtf7Itk+y8QlrEaNBooEWsjmXr/BuV2mw2tn/6PD1veZCImKpHa41pw+Of3/sH5rJSpN2GpaQQW3kZX244SNyebC4ccV+4u6cIEOWwwAmVwwJpJ1CHhcJfxnwT+cZMzCWVa0950phylXa9/yi2shKv5yymXF6YMkYFfyGgUVZUb+wEWw/Fs+qxv235I6+K1ZRdbU/7dBWc2sIjVyS4r5v2Q4FfuQnmEhNttYVkbllF+uUjA3mbDQ7XrUlTdgFxfYdgKcohvsdAIpNa0iq9c4MUqEJRFwTjsNr4C2p2WF34a87y9XQwWMjc0jT2UTQY4sg7eohWt3tX9ddqtZhWvh6mXjUtVFDVCKlN1WN/5FVVu7UdlRrzTUTainn+xo5MXf4D5v7XVTtb1dDZ+PZkft55lOv+8j6tevYnrmOfcHdJoWh01LZqe00Oqwt/LVu3iWcHxfCXX37A3u5iv/vaUHly5jyeGj+Mth27VTp22Mf5isBRQVUjo66qHlfXbm1HpXOWr2dETy2dU6MY0a2U1Y14tmrjvtO8/c1OdO37EZPSRgVUCkUtaGz+GtZVQ6ckLSO6CT48fDDofiqaPiqbtpFRm6rHNWHMN3Hdg28wpLMIWbsuyY3uHUVZ9gmGtysk/9eFbP/vw+x679FGsZoP4FR2Po+8v4b3f7cwYOKrDBh+tyreqVDUksbmr3H9YtFpNQxvV4j55A4OvjOJw+9P5fD7U9WKPoVP1ExVIyKUpRg8eWvhWgrzcrg0LQarLb5W7VbMZ3DJ89KeLd3nPJhTAG37+Jxyb2ilFMwWKzO+2MSmnCgGjPwbcYnJYemHQtFUqGt/DemUysGT2dxxQSJ3LAyNv1INOlINLegFXNgB0gaNYPCYSZWuV6UUFC5UUNWIqM0Kvpow5puYt3I9o8/VkRRpJyuviDapCQG3WzGfIdA8hoZUMuGj73ayYlce3a4Zy9Xdz6t0vCg3my0vjq70vE5T9QxWXe1er1A0FurSX/f3jwBrCTY7YCmtE3+dyLWS/tvPPoOqxvQdro2/QDnMX1RQ1YgIZSkGF3OWr0dnNzNnm4Uv91nILS2lZUoZGo3wu11f+QzV5TEEWhC0vvjtwGleXLqD9pcO4+oHqn7fccktAl4+3ZiWXCsUdUFd+SsSMwt2Smb8WkZytIb88hJaJMXTLsT++tMHW+h/1zONPriojb9AOcxfVFDViAi2FENFXDIZdb6BWFHO1EsieWJ1CYm9LuLpe4f53U7AldrrsSaVPwHcnmOZvP7l71hTezL4oTdU4U6Fog6oK399O7E9czYXYCsv5qYeer48Go02/cKAZqkC8Vd9BxeNPYhrbqigqhkzZ/l6rkqHNfuLWfB/Meg1gnv7RjD2y194YOQgv/IR6ipPIhQMmDSLbUeyaV2hJotOKzCufImcgmLe/vp3DphT6T/ueaJjVf6DQtFYcAVDAMt2FbLg/2KwSsnQLpKpq/0v7tlQ/eUKpvKMWV51pbRaraqL14BRQVUzZu3W/ew8VMiIrpBVYiOrxIYQcG0Hm9/5CKHKk/j5vX9QbCyk853eW68Fc0vQWFSO3pBEdIt2Xs+XZB6juMjElE93cf7wSQxsnV6r9kONGpEqFP7jup04c32+22EAEToLw7pG+l3cM1R5XvnZGTw1vvIMf22/v64Zse0zJxGZ2t79fLnxeMBt1QfKXw5UUNUEqLhyxV++fG0KNz06kx8zjfz4NVhtds7kFtM6OZb2Vv/yEUKVJ2EuK6X17f/m3E4tvZ6v6ZZgdbf3fFG452cK96ynrMTMd+u38N36LV7HwykAlbOgaK7UxmGu24kuh6356qy/dNpyvxwUyjwvKTQBf3+rC0SqI+PEYfKMWZWCOOWv8KOCqiZAbSsUg3eew7RPV7Fs9TqGXTswpJXa6xJ/87NKs46Q/9sqojv2JfHyO8j4bHetBVCb5dNqybVCUTWhcFhj9FdtAxGbzYbekFzp2rryVzDXNTdUUNXIqaqScKAjP38rHdd2VswTXzWpLKZCdNrQF9W026xkfv8h2phEUq79E0Joqpw+93f0V5uRYHOa/lYoAsGXe6SUdeIv17nBOKyq4EIjwrvApS79Fcx1zQ0VVDVyqlq5EujIz98VMMGMKF34ypHqfOc0erVPq1V7vrDb7RSZirFZtZQe+R2h0VK850fAsSO7kPZK1wQz+lMoFLXDl3uAOvGX69xgHFZVcOErnyoUaKNiOP3hw+7fLaZcAKJSvXNFlb8aBmrteCPGcysFcKxcWbZuE/uPZ7lHbcvWbSKnoLhW7VS8znM0WF27xnwTwx6dyfBHZ9X42qFG2u2c2rGeE6czKSwxY7fbsJYUYDHlYjHlYivOJ71jFxJbtKrXfikUisr4cs8X329kyXcbQu4vz3NranvfsUw63fw3DpzI8nm8rsnPziA/O4MtL46mzHiykr+SUtM49/7KaQ+K8KNmqhoxVa1ceXzm5z5HbVVNe/u7AiaQ2axDh4+RGCX8Hg2GYpuazP2/cXrXJuJ6XoomykD/J+ZXOufw+1PdO7U3NFTOgqK54cs9V7Y1syPTRo+0lJD6y/Pcmhz2xKyFJOtKeWzG5yx52b/Vx6H8/kqhUf5qpKigqhHja+WK3S7JysvhyTGxWG1xXnVXqpr29mcFjL/1XIz5JhZ/t4HESDtPXKrnlW9/9avmS23LJqTGRfLbrKkUmooRETFExCZgNh6vMb/BJYCCHCN2561Au92GALbPnIQ2KqbeR4IqZ0HR3KjoHrtdkp1XxDktIyrt4xeMv8B/h+07lsmOvYeYeUMUU745xIETWXRLrzk1oTbf39rmZyl/NVxCElQJIWYDw4AsKWXvULSpqBlfK1emfbqK/H0/kaCzYsw30SolnmFdNby1cA1rNmzzmcjpzwqYQGazWujL6N1ZS/dkwR/SSoLKv4KqyyYkxui4+ar+HDIn0f+WP/HG4xPcy5Pt0s72mY49unwJxiWAp8YPc+cgnDp6wF0PxjOHQdG0Uf4KHxXdM+3TVXBqC3ecKygoLHLv4xesv8B/hz0xayG39NDSKVFwc3cNf359PsumBZeXVF3phOc/XFbpHOWvxkuoZqo+BGYCc0LUnqKWrN64h50Hivhil4bc0hJaJJWi0Qiscivjztf7vRVDRfydzVr83QYoK2foAD3tEzQM72Ljb1XMVvm7B2DFsgnSbufk7z+wb9Un6C8/W7zTc3lybQSj1WrdKwMtplz3CDKQ6WtVAK9R8iHKXw2CtVv3czKjjNfXFpIc7XJYPHYRnL9cbdfkMNcs1WMjImmfILi1p46xSw5XmVvl7/fdn9IJyl9Ng5AEVVLKH4QQHUPRlsI/qsovuHZAL65tW8IjVyQw7YcCaNufcTdeyqjHpnslclac9q5pmbG/s1kt9GX0bqWhY6IgRi/onKypcraqNnsAZuzZxJn9O4g/9wpiU1qRGsJq6K3SO7t/Lk9Nc48gA6GuCuAp2dUdyl/hwZdzvnxtinu2yuUwU/J5rN24LSh/udquCfcsVZKGSJ3DXyN7afnz6/Npd96llc5vSAUvG7K/oPk4TOVUNVIq5hcY802M/9eHFBXks+j2eOCsfIrLzTVOe4eiVMLarfvZeKyUX4/aWLLbjFYDNjvklEouKNsT1C3AnGN7ObFtPbEdL0DXdwRFNlulmiwFOUZdlyy5AAAgAElEQVT3z6EYtYWKUFQ/bkjyVihCgadzxt14KRNf/IQXHri1Ut7T5e+s597+hjr3F8CWPcf51WxmxQELGgFSgrFEYtcc9RlU1ZaME4exVXBYnjGLjBOHaZXeucn5C5qPw+otqBJCTAAmALzz2GgmjLisvl46KEJR7DLU+Cp051pxd9t50ZXkM+f7reiEvcpp70AK51XH7KfHM3jyNAa2KGL60Fj0GoHFLnlidQmJvbrU6r2ezMojJzcPkZlFm6EPIrRa99R4xZosW14c7f45FKO2UKHqxzR+vPz17BQmXNM9zD3yn8bgsOIyM3kZJ9wrlz0dFomZ9zYWMn+X2auNUPvLmG8iNTGOm7oZGNPdTOdkLXqN4JWfS5m9U0NZWeVUhdpis9kqOWz7zEnYbI79C5W/Gi/1FlRJKd8F3gVg/QxZX68bLKEaAYUSz2XBV6WXMOiB10iIFLSN1/DB5iKWHRJoNGerk7dvmVrt1HcghfNq6peuPI8v9ko2nj47zVtYJok5s5Wn7/V/GXBxaTnvr9zGhgwt+oQ0Wlw0POD++EMgy4Cby/S1ojJe/tq7XJJ7OLwdCoCG7LCUGC15efl8/PXPfHV3Gte8c4RjibHM3XE2gNFFGeiZVrXDQukvTVke7/0KczdL4qPOOjQSwf79+2hoUwHKXw0PdfuvGkI1Agq2D56jzIrLgoXdgq6sgIToaJZN6OCVR+XP6NTfZcY19dF16/GFaw1MWlaMVReLTutYFhwfCW3Skv1qS9rtmIpLuO+9jfS6YRyDhp3Dr7+P8RJHnjELvSEZbVSM17V2S7nXbJULnabq7W/U9LWiKdPQHTZncwHxeht5BRZSY3X8+cqUsPrr07vacdPs00THRIP2bFkDHXDm9JlafwYVgx9fDjMX5XB67pNkaLRe1yp/NS5CVVJhHnAVkCqEOAn8Q0r5v1C0HU5CNQIKtg+eo0zPZcFnCiys3FvItOsjmfx1Kdf/9xhCCIr3bqC43OzX6DSQwnnVteG69Xh9n9ZMKXAEdq7rB0yaxc7McjrfOc3ruuzcAq+k9JLSMkzFJRiS07j6gRfdz1cUh+cyYk80+sgqC+bVF75GjnnGrEpbSigaDk3VX9CwHWaxSRZvy2PW0EjuXlLKsyuz+GKnidYtfgmbv3qkJTPx0mQvf4HDYTnHsyvlFRXlZvs1U+SPwyLiUkgZ9ghtO3bzel75q3ERqtV/Y0LRTkMiFCOgUPXBc5TpuSz4TE4RI3sIUqIFbQxwOKec1BgNyQYNi1b9wvw7ah6d+ls4r7o+Lv5uAynRkqGdbFht9kqfVXWr/A5/8gg7Dmfw9jc70Xa8mN5X34ZGq/XxSo0DXyNHh0CDK8SnqhXXHU3RX9DwHfbaWiMjewg0wKBOOub9XkjbeEGJqahB+QscK5VT+t9Ay8H3el1/+P2pYc13CjV15S9oPg5Tt/+qIBQjoFD1wXOU6cor2HcskxunvsQd50WSWyrJK5MkRcGTA/VMXVHCsB6RaM2FDOmsr7bPFfMUPKfq/e1jC30Zl3fU0zFJ61VwtKbPym6388TsNWRGd6Hfnf8mKibW675/fnYG0llZWCM0JKSkAt6jQ89zpL36gnn1ga+8hYIcI1tfuK3SfoOByETlPCgCpSE7zNNfEriojY7Feyz898ZoRi4oYXhPLZ2T9fRPMvHWojVV5mP6yrNyOSynoLjG4DEYf1WFywGeboKzDjMY4twBhuc5druNrC9eJkera3L+gubjMBVUOal43z/YEVAo+lPdKPOJWQu5o7eOS9pH8eyaYtrFa7g0XUvf1npu7WUnIkKLzWJmaBcdU1f7PzoNJKnV1cdIi5W5O618st1MdkmRu+BoVZ+VlJJT23/GmFdIwrUP0cWj1pTnff/tMyfRZvwbAJQbj7unxT1Hh7WtKFxXSZvV5S00pRGtomHha4VfQ3aYp78yTDY+/b6UO8/X07uljpG99EREaskpMDGyB/xp1S88MHKQ37Nr/jqstv6qCZcDPP0FZx2m/NW0abZBVUUJVfwi+rv1QV3164Lu7aocZY678VI27zrCsWjJl/sKyCux0TpOMKZ3FFLaGX+BjsnLS3nwIgMmcxlDOkcHFCT5Smr1JW3XKPSRKzq423Alylf1Wpn7f+P0nt+wtOhGmV3D6086ZpZcIza7uZzcFxzJ5tJu4+jMu9HGJiI0OtpOnhnch+tBbZI2m8v0taJx4Pmd9BVIhMNhFfvky2FvLVzj9tfXBwspMdspNtuZNSSGnBIb9/fTM3FZKTd3F/RtE8HV6aXVzlZVfH1fDguVv1xUrDPl8pdGaLCay8h9YbSXv4CQOkz5q+HSbIOqioXnwr1CpmK/Fp3MqrK2FMCfr0xxVBxem8OPBwr4QztBqzgdR/NtaJD0a63l8tlF6DQa0FnomV3zqKu6pFZf0g5kJJx7fB87fliBPq0LMecPQ2u1AgJTmRVtVAy6uFTajH+D4+9Pps29DvHYLWasBRlEpLbn9Oya/0D4KpiXn52B0OgqJZjmZ2fU2F5Fmsv0taJx4PpOVrcvXrj69NGyn6v0g1VudfsL4JbZJzgn2ULbeD1H820IAf1ba7l5non0RF1AJVmqcliw/nLhCqZsVitCo0NKMJVZsUtJx6lzKDceJ3PpS7S5d6aXv4AaHVZVwc+i3MoJ8nnGLHa9/2hAtwiVv+qHZhlU+So8F+4VMpX7VcLnrzzqU443PTrTLYNTxiLMFiu/nIDXfzWjERpsdjsRWsEFbaOYfXsbRi0o4oNn7qnyNauqZOyaqpdS+gw6/RkJ97nvDY6fPMOxD55HanVoju+mYPNXAOiT29Jm7Ash2/zTV8G8qlYK+iq94A+q1ouiIeDpitFzf2Fk75gG5q9NfP7Kw1X6a+4OozuYOXymhF+OwuzfCtBoNCDBZrcTpRNsfqQLRpOVUQuKfOZIec5AuTxV0WHDLr+g1v4Cx8q/M9l5mE8tgV+/RggtIEFo3A47OmNcMB8dUHXBT18OO3X0ADnLvFdT+0t1DlMET7MMqjxHM0M6lzD7m/X8ONGRhBeOFTK++lWdHKuTgee+WS6qS7p0jeB8VTJ2XedqIxBpF5eW8/rSzRzOLKTPXz4hJy8Pc3k5QuNoP3P+U9jNJZz69EmqrsJSt+x6/1FsZSVez1lMubwwZUyVAZKq9aJoCLhc0TlZz9XpFrBZgMbvL6jssOoS7D1noACfDnO5rbZBp7GonLZ3vkj2xmXE9rnBMUOFJGv+3/1uoy4wF2a7F+a4qMlfoBxW1zS7oKpi8uTQLvDp5nKEcPxpr68VMhXv8e87lsk7C1ezbpKjHkht5RjIlLbnyNJXJWOAlJN7sJSZ+HSkgYMns7njgkTuWFh1v6SUfLF+L59syKD3jfcRvXw7ekMSNqMRfXJbhC4CAG1sEq3vfoMzHz2MVniHVdLq2o5Cgt2KOesI9pICn/tfBZMnYCsr8UokBUcyqenbN2u8tipU3oKirvF0WE6BiXv7RjB1RTEPDLQ1en+B/w6rODMWGW0gO9f7OqvNTmlJLu9dl47Famdg6zL+umZjwP2y2iS6mHi3w6S0ozUkYzeXVDpXCOF02Fl/AW6HhcpfCK3yVwOk2QVVFZMnI7AwqKOGi2acJDku2n1eXa+QqXiP/4lZCxnWBbCUAnpSDTqGdIZrp77O6hl/9lsAgSSneo4sXZWMK0rrugffYMw5GoS1FJvFDJbSKqW9YN0uHp2xmBvufpjrpj6KqSCPcuMJSo0nMH07i9ghf0UXl1Jjv/QRke6fpU5P247dsKa1wmCIw2QqwmQq8soxqM2tN43QYDHlunMYXGiDrJGlbgEq6hpPhx3IK0MI6NMSL4c1Vn+B/w6rODNG216V3vO//reMwv0/kxitwZhvIkFn5co20qe/fFV+n/jiJ9hsNuxlJkr3rCX6/BtqdJhWq3M7zOUvAKNz7BgKf2m1WqS0KX81QJpdUOV7FBRL7y7V74/nSbAblHqOsO5buoFVG/eyff8xDsfAgt2Z7iW9hcVllJaVh2zU6U/+gecIbs7y9WQbc3h3QwRv/mAmOVpDbmkJLZLiaech7Z2Hz/Dm1zvZm1FCu1gbpvw8hBBsWjGfToZysr6ZRVvzUU4sfJL4219FE2lA4thWRtqslBfnI+1Wjkwfi7RZOPLmWHefBYIMjca9VYPntLUrafTEZ38PWFKumlcVqxcDBLurW005CyonSxEMlR2mA3R+O6yx+suz79XlgHq+p0VrtpKTU8rS/ScoK3c5zE7vgj3V3kr0TG43l+iw7f4eUZRB+e9foRs4DqRjVh67jZKMQ25/CQHSZnU7zOUvALvVWum2W8aJw5z49MlKieg1+aBVemdOabTKXw2QZhdUhWKZcbAblL61cC35efmkxsbTQl/GiTP5XNw+iiX3tPXau2/UY9N5e1jLkK3o8Sf/wPWeXOL89k/tGT3XyLj+8Tx9TRLPrc5j6X7JB8/cQ35RCS8v3MCpiI6cc8tjbP7neGbd2o7Jy5ZwzmXXs++HJTzyh2ieWbONS3oY0B/N4tSWxURdPAbsNqwFmdhLC4lIbU/bsS9wbOY4klq2rvZ+vyuQArxW4JTiuK1oKyshz3jILamcMyfJff7/ENrK+2nFJbcI6vOsippyFlQ+gyIYgnVYsP6as3w9V6XD35ZnkSAEmfXkL8++V5cD6npPxnwTyTFa5o/q4MNh3onvFW8leia3X/Pfk9gP/0hKlKB87yosfYahjY4Duw17aSE5K94kIrUDwlKKIcrRn6oWx3j6CxwOq+gvwMthuWdOkudjYY202yo9FwqUv4Kj2QVVwRLsBqXGfBPzVq6nVYSdt37KIcdUzjOX63j5l3Jyim3uEVdxeehWJBrzTdzzr4/Iz8vl/WryD+DsbYOqEmGF3QIlBdz577mkdO3LuUMfYmDrdL6f9zbDu0HXtGiGdyvmq7efZXg32JllY0R3waaThbx2XSTjvljCmb0b0MYkoEtohSY6gbZjX/DZb08B5Rkd/dSVlxOR3BZdRCQWczlCF4HWkOSWUZvxb3gVCtUd3I1x2TRShz3q1XbWgqd9vo7rtZ4aP8zn6EvlHCgaM6Hw17J1mxjU1kKuyczpIjuvXBtR5/7ynJ2qLgfU87ZndQ4TpYVeta8q3kr0TG7vkAB5paUM7Kzlp+MmDn/8ELbYVDRRBgBaDP8rUantfK5i9nSL3W7DXF7uyMsCt8Mq+gvOFgo9cXAPcv7TpP3fP73aFUDWwn9Weg2o3l+gHFbXqKAqQILdoPSthWuJlOX8d3gsoz8vZEQPLf3baBncUfLRpnweuSqFq9Jh0apf+Pb+loDvqe1ApvDnLF+P8fQxWsXr6ZGWUmX+gQvXflhJmmJu6CTdibCj+5pZvKOIjkkath48xsNPfIAhIYmi/Fz2/bCEf4x2rNYZ1cfAx7M28of+XXlhh50r22sZ3kNwYRsdt/aw8u72bKQ+ksy5jyH0kT77AGCz2ZBokHY7Up593pyfgVWjRRtf9UyTa2Wf1WpxjianAyAiYmg15j9IzsrFtWO8i6jUdnS+/zWf4lFT3IrGTLD+cs1SrdlfzL8GRzJleQmdkzR17i/P2amqckA9qc5haw4WM31oNFOcldorpkKM6RPDW7MO8dL96RhNVozFdu46X0+xRTKmt57pG4oosCchLWVoouORdnuVfTeXlyE0escvTodZ8jMQQqBr0b7K67KXTydH2rBaHYFgRX+Zs44gpD1gf4FyWF2jqfkUhQvXCpc7LogBHLJYtm4TOQXFfrexeM0WBnXUYLXbubK9hpwSibFEMrijlpk/53HBmxl8uKWQazvYqixvAJVv5VWFMd/EF99v5JkrdOSazo4mq+u3az+s4jILn2wtQAhIi5FcMO0oqTGS5wbFcGt3ycavPwNg04r5DO8GKbEOeURbixjTW8fM709yVQcNPx61MO58HTqdlvv6R5MaaQGtY7pbWso48cFDnPjgIex2GwU5Rq++SLsdfWo6WkMSWkMSuqS26FPaVSsyOLuyr9Vdr5E2+t+0vvsNWt/9BtJjtc6TM+fx/IfLSEpN4/wpb7sf4dhzS6Goa1wuGNi6zL1xcKD+Wrt1Px9uKaRPS0iIkNx2jp6Ptlnq1F/L1m3izREp7Nh7iGE9o4Ca3evLYX1awpVvH6dPS0iOFlzbwcZHy36utHhJWEu5o7eOZbtNvPVzHlE6uLevHgHcfaGBtFjQWRyvK80lZH/5Eic+eAhzUa6PosICfWo6+tR0hEaDLqktEantHflY1WC3lLn91XL0v3z6K7FFK+WvBoiaqQoAXytcqlu+7Gs1SXKsjqduSMdaXsKEi2DqinLatWlDn1gtU/Id+Qhrt+5nzWkjF87yfWsukCn8OcvXc2VbM31b6xjR4+xosmL+lGc/V27Yw4HTpcwYEsmk5WX8bxtITSRpCVpmjOtFSqyeiS0t3DF/CQOG3s6B337mt6wy5m8/CUBRnhFpt1FqLuOHA3ZG9NAxfmkZf78SEmN03Nxdy+wDGaRNnIPdYkajdwRY5qwjZDuntH2hiYgh4+NHEBottuJ8NNFxCK0OTUSM8/+JQqGoCpcLEnQ2vzcOruiG2U+PZ9Rj07n/YkjRlzPxoghGLSzhiRvaMOVyU534a1hXDakRZneg80haZKVtuyrOenk6bOqKcr4+FglEEx9ZylM3pJNq0PF4CyujFlROhcjKK8Jms2Mnn3KLleQoyR2LS+mQIMgttjK4k46PtueRNvEDALfDLMYTZC2opnaV0Dj8pdVhM+USEZeC1WpBGx2v/NWEUEGVnxjzTWzedYTDUdJrhQtUvXzZ12oSf5ZC15SI6u8UvkteLwyE3FI7F7cT3LUkjznbLei0Gq/8Kdd2F78fOMnAPl0Y3LqY3YWSLi31JF4yhqjoWNodXchfPz/Mq7d1xmazUZp9jG/nvs3Elz/x2c/v573Nvi/fZOVRQZsYCyM+KyM6MRlbcQlaSxFm43GwO7arwflf15Q2OHIDpAStIQlNRAxtx75AadZxdIktyZ77OGkj/urOJcj4zCGzcuPxoJcVB0rF1TIFOUa2vDgaIe1eO7u7chZUPoMiHKzeuIedB4r4YpdrFW/NGwdX5bAIysktdcwWezqsLvy1YFQceXm5XN9V5+UvOLttl+eWYxNf/ISBfbpwfbsSbuyXwD6TY7BqKjWzdOU6hBCcKbBwz9zjXNElifguVadCXDD2WQryc8kvg/UnrKzMjMBeUopEOPwFHg6TaITGK2cpJ+sMujjHSmN9UhuSr52EPjWd07OncP6Ut935UJ7+Atx1E+sD5a/QooIqP5mzfP3Z/fZ8bLrpa1aq4mgs2KXQrtfxZymxq8/Dumq4uPvZL8YUo3ffK253YdDZmbX9ENd1N3DP5a0ZfmEEd8xfhT0qiR+P59JCX8LVMw5iNZfTKtrO1tWLuHnS3yr105Vn9fytXZk87yD/GBTDoytLkedeR8vB97B95iRi0jp4JZUDWFqcrUflIvVGR5J5mfEkQqPBknsKiymXkpWvAw4J2C2OgqGn5z7puEhCyZlD7jZcArSZcjnz0cNgC93KGbWzu6IxcO2AXlzbtsSnw3zlOFXvMDj75yOwcg6B+ivVoCPVkEY3fPtr1GPTvbYcyz1znLmHjlfaJSO70IJeWrloxknKzVZaRNl479c8+hf4DiqN+SYMkRoMCXqeuEzPnYuKiargL8DLYVZnqRZPXP4SztIKFuMJbCX5XsGJTiMwl5d7+yvjkCMPS4hK/rIWGenQuXuNn7c/KH+FFhVU+YE/IqhqROc5GgtVOYealhK78KcysecKmR4J5ew2SmK0kqeGdSDZmSM1vBvsSByApiyPWcNimbAoh9ysIt4ZEc9t8ws5svt3Op1zgddr/LjkA65MzmbTkTjG9NbSOUFyTx8dMzd8xsEDGynPy+D4+5MRQpCjPfvP0FrkyKnqfP8MMk4c5vSi/6BLbImUEmveKaiwqY1rhBR5zYNEprbn1KdPYjeXYDPlYlw+DaeVELpIUm6YjLTbna8h3ds5qNUwiqZOTQ7zVWahLhxWV/7y3HLsveExTPkyz2uXjKvSYfEuC/PGtWf8ogJOZOYx+5Y4Rn1eQrnFXmlfQVfh4/ZRpVzeU0efloLeaRoOVPAX4OUwT38BFEz/I7pER8K+JeckCNes/FmPeTrMuPJtt79yVryJtFnw6S+b1et65a+Ggwqq/KAmEVRV48Sf0ZgvqlsZU5NoPK+tKEDPY67fl63bxL+uimTi56fpmarlZKGFS9vpuHrGQQxxZ7+UZvkVd/WJoGtaNIPTctlp05AUaWdkTx3zXnyYv81Z6z63KD+XPd/N549X2Hjhp2w+uTUaU7mNey4y8Pm+IroOvo6vFn1G+/tnVXrvx2ae3Zi0VXpncgwJZM993LmKr4iIOMcql6pWuFjyTpM26rmz0nG6K3PBM+R/M4OYlh3ciZyua5+cOc9nwTuTqajGfbQUisZAdQ4bd+OllWak/CkOXBX17a+KW45FoKu0S0ZuUSkjewh6pEVySYsSYqwa2hgEN/fQsmj3Ya/yCuBYpZ2XYyQ6XsMdvWNIjISeqVryLYLufvoLICI2nuy5jwNQXphNpHPFcnRaezpX8BCA3VxC6rC/OLa5EQLXsufMBc+QtfC5Kv0FlW/juSq3q8Kc9UtIgiohxA3AdEALvC+lfDEU7TYUVm7Yw+/7jcz5vdR9Lx8q13SqWOPEn9GYL6orzudPvkJV11Y89t9Fa9HbSlh5SMuUi/T0ahFFtM5EvkgkrSSWu16c7y6ZMOfx27i+dRGlxVHc2l2w9RRkFtl44CI9iz8+6TVb9eOSDxiSXsq2LLixqyAtRhKhEUhp447zInhn5TyEFJW2WADH9jGeuATi2pX9/Clvex3Pz85ACg1i6UsIrc45sgO0Ff5pSypd64naZLR505QdZsw3MX3BGpJi9T7rOkHlDdNdz9XGYfXlL89A0WK1o7WVMeocDetOannqhnS2LSji81ceRkrJqMem88hgA/uOZXJzdw2bT8GRPCsjumv54aiFz77+mQdGDnKnbsxbuZ7zW2r4QzstiVEQqYPEKMGI7hrm1sJf4Cj+6ctBng6zlxYireWg1XudIzQasFurXdmnHNYwCDqoEkJogVnAtcBJYJMQ4ksp5e5g224oXH9xLyyFWQy7dmCVmxJ7jujemXmUIyejqy1MVxXBFOer7lrPYxO/3Ei5NpZ3V+5AL2LYt7WYuZtKMOhBShtp8QUMPyeWjV9/xuAxk9i0Yj7XtS0mSWcmL+MEXZM13NxDx7eHrfzpogjGnqf3mq3avnYZO4osFJfbAMn0X81oNKDVCFINegzChiHK4HOLhXKPnARLcQGnl75Em1ser/I9S6Gh1e3/RhPfEqGL4MSMOx1Lll0lF+ov37NOKMrP5bNX/sqYx17FkJAU7u40SZq6w+YsX0+XJE2V/hr12PRKM1IRUQaMef5tzF6xvbr2l68c1cLiMuyWcmL1ktbxJv46uIVXGYdhXR37l5aVm+mSrOGWnjq+OWDhjvP03HaunhUHytyzVXOWrycSMxtP2dh4yub2V2G5JClaQ0Ks8MtfELjDMj59DITG22GA1pCMzZTn1+fYkGiO/grFTNUA4KCU8jCAEOIzYATQJIRUkyR8TatPvDS52sJ01RFMcT7XtfbSQs6LL6H3uJeJjnMU5CwtKuD27mY2ntBRWlrC9yfg6bk/AfDOY3dSmHGU7HwjiRGCnWfK2J5xHDNv8d3K5ViMx4iSZSzfqcNosqB1Bip2KZn9m4WEKEFR8UlMBXlIKUmI0TP3nvNIidWz/8Ah7lxQgC4uBa1Wi6vKiiw6K4iqxFPw29e0KdlPwdavod2FAX+W9UVtcxr8Ec6mFfPRZe5wB7iB0ByFVkuarMNq469hXTXVFgeujrr2V4+0JJ/5XUMems7OA0eJ1Aq2nTHT+p8HQYBVfgtAhMbGtDV2rDY7GuEYa1ntkv/9ZqFFjCApWrBo1S+MvmaAY3uuie1JNejYsD+DMZ+biI8zYC2wEJ2YgEUIv/wFFRzWQAkmJ6s6x0gp+XHxbOyntvHDwv9xxf/dV6WHpJQU5eV4PWcqzGPprOe4ZfI/iI1PDOAdhR6dTgCd/Ds3BK/XFjjh8ftJ4OKKJwkhJgATAN55bDQTRlwWgpeue2qShD/JlP4SyMqY6q49k1XGHQM7sSKjkE5j/4m029nzweMczSvm0nMNvHxNGXctWsqTW7eicSVYFlu569xI7ukbyZwDBr7UXkPqwDGOtn+ax5D8z0hLSaKsKJexfQ3EiVLaxmt4/ZdyzDbJmSLJv8ZfDxoNY7uVUHzGSLGUxOr0DOsZwacHbOjiktz39z03EPUUj91mxV6Uxf63JmAoOcXTQ6J5cMUnnLEsJiq5Dcc+/RttbnkcfUyC+3qtVos595T7d8/CevaSQkyr3kDK6ouFBkNt8xVqCphcKyhn3dKWycscNcECCY6CCciaGTU6zMtfz05hwjWhWXlV1zQ2f825JZa8vDJGX9qRpcdziex5FQDFG5eiwcbbvxRgs9l56aNv+Pe8n9E4V9SZy0q5sKWGS9pq2WSMZre2G8ld+7nbP7Z6Dp0T7HRNtDOgrZYymwa9sLLltA27hCs76vhibwl9x7/IxW0F074rAAlajYaOiYLNmWZKysqxaaMYcsttfLFkCcaNXwJQdHAzkZk7OLn0NUrzjcjyIna9egcg0Zfncd25ej5b9xF2q47MnxaQ+/sqki+4Dm2Eo5Cp3VxG6b6fsFisWE15FO/9ibJTe9wV2KW5FJlzDLvQu18ToKwwl61ffej1u+fxqs6ryPXXX1/lsequA9ix6Sdy929k0Wt/5byLBnodO35oHzt+XM74fjF8tvpTOsjjGPVtiEtuVamdk4f30TfVQkqCwf3cbxt3UHx0L1tnP86gAedV24+6Rq+PgGEX+XVuKPCgPIUAACAASURBVIIqXzdYKpWLlVK+C7wLwPoZ1ZeTbSD4I4lQrOhzEcjKmOquPZMFybE6RnQTLP9pKVlH93Fhip0nBxro1DWRU0eM/N9FrfhSewXWDhdjLi4ges1r3NRVYLNaGdErgkWLv8bSb6hjN/a9q7l1UAQv/1LI6Xwb/9ucjRDgLNOFzQ6xkVq0EZHoE1uy5NhpPj9kdWyWHBcNGIhKbUP7u15xj4hcoyO7zUpM0XF38KSNTKZ7qg5rizYMaSG4rF8q9xQbeXdzOeUlWbS05HPknT+5a79ohIZW6Z3dn0Om3UbGnD87EjyFQFuWS7eoIvbZBfvfmoDFeAx9agd0EZFhXR3jT8DkqlTv2k8xkOAo2ICsmVGjw7z8tXe5JPdwPXQrOGrrL7vdjsVqo9xsCej1Xvp4FWkR5Xy152yduLSIcia99jnX/aFPtdeu+mUbaRHljF9QQnuDBXuEkW7xNg6WlQGCEb1juaGToHV7xx/kPWcK+T7+BpIH3EzG4d1E/fQWUwYbiCnL5sIeiTy4/Ag55wylVadeWEuLiNPNxhClZ1+und1GG9lFpW6H2eywN1dDsRl0Gg17iuPZscuGvayICINjhiSyRQotu/XDdvAX3ri1Azt+NpC7dTF2ux29KZvnBkfy9+83kRARS4dELddecb7jjWVsZ+Il8aQlF/LBNivFG+aTrjORtWE+UbGO/y/6iAj6XjoIgE0nf6fw9xXoDMkgJUIINGX5dE2UHCq0kLFiBnHJaej1ejqnGXjm8gj3Z7h8vp5Ovc6v9Nmaty72Oi9U5BQWM/HLHXxwWwpPrN7BlAsuITkuxn38XWMeV18WzyNXJJAYXQBxgqeHdcZssVZqS3tVH9KSzvrYmG/ii69X8dU9rZm07Ax3Dhodkg25a41GX/M5TkIRVJ0E0j1+bwecDkG7YSeYIKc2BDNq9Lz2lLEIXewZTMWllJQtR6/XsTXHyrAjdjTa37DbbAhtEdo2W9G2uxBO7+TmXhEkGRy1npLjYxjRrZivnVPWI7oJkqI1zLnPMTr/z9LdfL5XYLOUI6XEjo1iXQL2yDi0l09Aq9UibDZHYvlk78RMu83Ke0/dx9Tn38KQkMT3896m+5klXNYvlfGFWXy8pYCnr2/F5HkbGX1NLwDG9Evgi3252GN1zLypJXfPOcI9/5pBy/RO7hkv1xQ8Oj2pNz6KtFvBWoZ+zWv8a2g6k+Yfp2WbFgj7MbQd2vDHFz7y6pfBEMfBdyZhyTuDPqm1ewavrgKvmgKmivspjumX4K5g709wFExA1gxpkg7z5a+hXQR/fnMxN1x2QZXXff7TPhI7Vf7jXBPrNhwFSyQbsz1rwEUSa5f07X13tdfu+erPFOdEotNq2ZlhRGfQABqKTi5DSjtLhYWlm+1otLsAsNts6NL3EhGfgvXE74zpHU231vGUZ+USmRbPyN425h3/jYg+Ayncvopbz9HzzMje7tf7z9LdfPxbCRqtFoGNXOIgAnQJqUQOeRSd01/dPBLLz3w9A2w2JrzwMd+9dj8pCbFM+3QVnNrC+IEJZFvzmL2lmNmj0rhv6Ta0GsGi2+NJNeiYcmk8a44VYpda3h6ezsg5Z1j2ylS6pafR+c5paLRa9i6dwcV3P8n30x+lxU2PIe02tNZSNGum8eLQZCZ/doTkaEHnLokseXlypc8wJS6SLbOmEpeU6lUAuVVSLK1TEyqdHyzzVm5gZC89l3eOZWQvKyvX7/CqH7Zu03afAb0/fQl2j8pwEoqgahPQTQjRCTgF3A7cEYJ2w04op8arYsCkWRiLPBPa4wFIjYtk49uVvzhV4TnibDHiObQdLyG931AOLp5Gqwo7n7tX0t31CqeOHsBy7HcWmMqZ96tjOa42NgNpt6JJ3krxqQN8Qjmf/OwIyFzYtFGI5M50vn8G22dOcu+u7nqdqrCX5KPLzGXj159x0ZDRXoHDTV3tLN1i4of9+YzprSPKWgREkRKr57q2xezMtNEmQsOILjbmv/oY2aeOEpHYlsPvT8VaZKS9Pp8Cm5asBX9HIzTERsDIbiW011sYeU4kn+/c/P/s3Xd4lFX2wPHvnZIeWkLvIAKCYndVrGCPy09RBAtib6iou+sirrruulgWVwVkVVTAQlHAVRQrIIqFJqAU6Z2Q3tuU+/vjnZlMymQmmUlmkjmf58nzkMnMOyeixzP3nvdcPr0xmWsWrObogT107F65Rz5x2lyWzZ3Bvm/epuewEX4LkGD6lQIpmKqfp5iSaOXKfgRUHAVbkEWhFpnDastfTq3ZcvAXFizb4Hms+orU8Sefxv2PPRTw+1Teyh8P1njP4/W5lf+BVz70/HnSuDS6ue5i884t3kM21z17Hafc9AIAjiPbWLC3nAW/pmMvLMKSbJy/54jdxuaZj6AydzBfl/PB5sr8pZTCYYrlmEc/Djh/OWwVOEqKyE3PqzKKovpIh9REC+2tZRzf0VyloD2vawW/HnWQGmNlWA/NGbdO5ue3JpKaHMum1x6mk7WQTf99CIt2krXgcUwmRRurk1H9yuhpKePqAVZiLIrF23ax40AG/bp3qBLfTecPZMlXGaRdNCSgG6ICPdDa1+v9zT1r6IJEMNvIkSDookprbVdKjQe+wLgd+S2t9eagI4sAodza8yWrsJxBd9S8TXbzG4/U6zpaa/63aivzfj6IzRxH37QHA35t62v/BcDhWROoKMql4xVPoZ12zBYLpR89R5uxL2LLOURMbCwOhwOH3Ubmgicg4wiWnVux223YKspRgCUm1uf72IrzSXIUMOWq/ty3ZDHlZaWewsHhcJDgKOT642N479dsTMCsDQdIbGOciVWcl8eAjrGUFebxwFkJvP/aBrokKyztj2P0o//h3YmjmZ7WmfuWFHPTs/PRWjPn0WsZM0TRuUcPRjv28dWOEp74poQR/c18/OpT3DF5tqc4uvKuSfXaLgumXymQgqn6eYpuSemr/L5fMAVZNGqpOax6/jqSlc9nP2/ji4x2HHvG8Co/69ZnAGZLw/53EO5b+RMunkBsag8A9rxyA52ueAqAWKedwiVT6HzXu9hyDtHjmIGeY2GOvDcRrWC/K39VlJehlKrzZmFLbDwWRzEzru5qTG8vr6gy0sHiKOOG463MXpNHWYWdt9eWsGSXwmRSOJ2azNxCjusYQ35hMSkJip7Jmvuef4/PJt9hTIVP6849S0r44IUJntEOIx95iRuGQEKMYuRxFq5fWMqIAVb+MvUDFj9/n6c4mnzv1fW667Ku0RWB8Fc0BbMg0dQ7RKEWkjlVWuvPgMi9vaGF27o3nRc++oWkEy7jwgf+xOod13sSmq0ox/PJK6Az8Zx2sj6dgnbYsVisOEsLseUcBA0V5WVgsgAKc4LRa2Bt3xNMZpQlBm2vwF5RjsNu7Jnb7Tb279zqubR969dc20+RVJbOlf2SeXfFJ2xRDuZvOkhZcREWRwmt4kx0axPLnNsGMu27LLZ3vgqAY48s5uZBmlhbHialaJ8Az1wYx32fr+abuf+tsdUFeMZAFORkkqxKOTbFTEaxg5NjzGT9aqxWbf7+cyxHf+WTGX8PeLss2H6lQAomX+cphur6oqr65LA3PlvPtq2/NXJEobftQDbH//Euzj9/MNZY3x9+ws37brR65y8NWZ8aH1K1w14lfxm5SIPJggbMCW2wtu+JMllQFitKmXDafOev/J2bGNdP4SwtIO2YGOYsW49FOT0jHbBX0CpO0aVVMUvv6lnlKCD3NuH1gxTZeYV8v8/Oy5fGcdNH+5jy/pe1bnW5D8BuZXFQUurkp4MOurc2sSfPyfYcY7Xqk5UbyE0/4JmNGMh2WTBjL9z8FU3BLEg0xQ5RY5KJ6s1Ydn4xg8ZNocRuIia5Daa1M+AtowfA+y676vNUzGYztqIcds+8n9ysDKxJxpRyc1wCMbSny7iXPEvtm6bdgzJZPUfFmKyxVGTtdw3atKOUCVNMPEdmT0C7ppg7SwtBKbStnOylLxuP2crp6DjK1Vcn4XA4GHNyaxZvzcHStjs3TXqZ9565n6KM/ZQAe8rgnOlGQZCUbsyZWZdewswVWaQmmMgvtfF//S10SYbrBpl558v3mfJgZf/V9fMXY4tpxco9eSzdrMgqKiTWrDEpmHFFPM+uqmBEfysLX56EKs7ilRGdGfv2ai49Z0CVa/gqloLtVwqmYIqE60e7npfcAaeW+H9ihBkQ7gB8qO00A6jsZ6xtHpRJmaocvO7OYUpB91uMnFOetZ/sJS968hcY96+YrDGec/iUMqFi40mf/ZCRs1yFWG35K77oEH+8uDV2h5OxJyeyaFs+bdu2Z9YT47jl6bc5nGEcUXO4HE6dbhQE7kJgxfrtHEwv4z8rCjDj5KoBVlITFX/sb2bu0h9Y+6Cx0ua91eU+AHvhJsgucZBgVUy9LI6Hvyjn/wZYefDFuZQVF/HKiBT+7+1dPHd79xrXqK1YCkW/UmPu4jTFDlFjkqKqGbLZHcz8/Be+P6iwxbTmuAdfq/LzzTMfIXfvLsZf+QfsDhsWz6ctjdlixWw20za1A8/MWsLk8WM4tG8PDocdXaCrHOS5f+cWHI6ad2pU1+HavxMTG+f5ROmefO7dq1Cw/HVGtdpAlx7tKM/cT0qilW4xBWQeyWT1Z/P8FgLuhvZRQ5K48+3N3HWqlT9/Wc79Z8TwydYytMMYmeDe6lqamcjt5/dg/Dmp/OODDby7sYIbT7DSKVlxfAcT72woB/MG7ruwG11iShgz2MLXm3M45vyudW6XSb+SEKFVfftw88xHcJSVkJu1C+101shfbu7Dfh/44xnYinLQTifanb8AtMZut6Fr3oxeRcfRz4DDjjUmlpL03eR9/kqt+at/ViHdunfFmXuI1CSjbyr98L6AzkT8eMp4XnzvSwp2/szXW3OYeG4MJhRbMpzEKScOpxGj91aX+wDs4V2KePCzEgZ3MNGttYlLjrEwe0MFZvN+/nJhKqkxFVw/2MKSLUU83CG2zu2y5t6v1BxIUdXMvPPNr3y8/gh9LxzD+Zf+ga++T6vxHEdZCZ1G/xOAzM+nk/2ZkRgcxbmY4lvhKMkjxtU/4V7Nys5IRykTWjvJ+OApz7W8p/pWp6yxnhUqpRT2YmMgnsJoMK0ozOHQexPpesNko5l0fxkLfjuMvaiQpDb7cBQWMvXKJB5cNINBQy+t0jhenXtL67XlB7luoInZG2wUlmu+3m1nWB8LF7y0lTauacZ2hwNn6XrGXOJaeTqvP0t2buPuSwfSvlUc93a2sbIgB6fTKIrKcg9yaV8LYxcfZc4mu2ebobbtMulXEqJxOcpKPKvlGUteqpG/ABwVZZ7nJ7drTykxlGYeBO2omb+cDmrjzl9ojXY9x1Gch1K1569NOXYuffMo9uJCOrbVFBSWMiMtkX8tW82y9duZ/cQtdRYmK9ZvZ93WPK4ZYCarGOZsLMfmhM5JipNf2k/nlMo7jVMObsVWVuQqfhJxqv3cMCSWQX0680RnJxsLCnBqzdiTE8nNzeGSYyzctDiXOZtsnqPUatsua+79Ss2BFFVhlpocW2tTempyZc+D1ppvN+1j9ortpJ56BRdNuMhzAjvgab50s9ttnr6AjqOexp6fYRRL8x9HmS0oZcbu1FUOC1ZK0e2+OTht5Zisxntrp5ODU6/n6PxJmOJbGcviaE+hZU5sY1zPEkuHtAlkL3mRlLSHPU2jpRn7Pf0NCRdP8Czj7555P6ddcgXHHlnMcd00I/oe9jSO++JeyXri6pOZv7mIGLOTqZfFcf/SMiocJlRsEo/MWglUrmq5C5+vt+TUejfhr+l2dF4ZKZ170L6bmbHZRg9XXcWR9CsJUVUwE7kB8rOzOLR3h+d7940vDrudlEvHo8zWKvkLALO5Sv5ylJXQ4bp/YGndsWr+mj6Wo/P/hjmxjSd/AWh7BdpWbuSvmHhSht9FQqc+7J95n+eg5Or5y7JvNQOHXsK2t//K2MsGw6F1XDKkNasOpfPBr/v8FiYfTxlPrxF/5cOt5czfXEKsWfPKZXE8sLQMk9nC2nee8jzX3YOVmmThxRXZjOhvpVdbM1l5RXRKacV5XSvYlG4nL89J7y6p9DObGJ+V7/ckj+ber9QcSFHVBGqOTTD4G5tw+j3TSc8tJq+gEKzxlJXb0Ut/ROnHaNPeGIKXm5WBpbychE6VAzCV2YIyWYxZTYDWTmJSe2BOakfnm1/ClnUAkwmKvn7F8xptrzBWnbTGUZjt6TnAZHYVUQrttNP17rew52cAGhx2UIqj8/9G+rzHiUvtFtA/D6fDzu8rF/P4NUmU5R7ggbMS+GhWzTEHtXl60XpP0XTWqancUprFpraXcGjnForyc0lq3Zata1awcvch5m4owWQykZOVhUU5mLXxEMltjb+HwtwCym0OLnqzAEtcKXGJxiRff8WR9CuJaFNXz9PEaXN9jk2YPH5M1ZMTsrNwaidKOz35C8DhsHs+iIErf1kqh1VWz18AFRl7KFrxapX3y176Mtphr5q/Kq+Kdjroes9bxrZgQQbYbZ78lbHgCWJapWKyxvn95+FwODxbaDa7k8t7O1i2U7P4m5/9bqPt/Z9xTre7aBpxWmu2F+cyd4smO7/Y89qvVm9ly65s3t1YxpGcIkw4mfJjGWZzMR3alpGRW0xphZML3iwhPr6UVolG3P6Ko+ber9QcSFHVBBoyNqGs3MaOAxm0Ouly+v3haqyJrTm0dwexqT04PGsCfW6fyuaZj2CcyKIpzaicraIdtS93+5KXmQ4mM46iHM9jHa79u7EsjsZsjUM7naTPnYg9/yg47YBxgJbZYsFisdI6JZU+t08h/cBuT2+VdtqwF2axb9pYTMrkOWzU6ijlyn6xxNsLiY2DDkkWxgy2+F2tgtp7mubPnE+y1enZght42vkkFOyh57Ab6ryDzxjDkOgZwyA9UULU1NCRCdVfV1v+cpSVoHVw+QugPPcImCyeYqrDtX93X824wcYcY+SvvPQ68tfUOvNXjFmzdfsyLI5S0o6xkppkIT27gF5tzVw9wMp3hwJr+q7e13R5X3htVTavfricv91mFKEXnT6Q8vwM0i46u9bruQ/CnpGWUGUMgwg/KapCyNeK1JHMXAYFeA2tNXO++Y3Pt+ajEtrSedgtPp/rKCvB2q4rWUterFwWx+g9UCYTgR53p5WJjqP+TkxHY7XryOwJxHTobdzl57B7VqCUyUxCh55VhvBB1ZPZvY+MAbB36OxpKHV77S83MnfDXs/dfGaTcVRERuk6z2qTL9V7mtrEmbiseynO2FZ8t3Ixx519SUDjDmTiuBBV+VqRystMb5T3c/dOHZj1sGebDYz8Zcs6gLFVV9fkKC8mC13vfhOTq08qpkNvtNbYMveigPgOPYLOXz/M/Q//va431058jfd/zeLdjUfJzC2gfYIJkwnaJ1pY8m1gM6Kqz7e6/ZQYZn/+A/decwFaa78jD5rzxPGWToqqEPK1InVo8h0Bvf7jH7bx7vd76Xn2lVx4z8V883PNJvRD703EVpjjaaQ0JxkFgykmga43TAZg7ys3kLPk31TkZ6IVxsGgiW1Q7nEIyjjjLzcrg8njx2BSxqc7z0HEuuqhxN7Ksg7itNur9EDkZmWg6nFg8V3Pv+vZwht/TmVCm/Zdlt/ipvrWnjHfyka/DiVceWxiQPOm5A4+IWrytSK17tnrQvYe1fPXgbcfRDvsmONbVclfuZ+9aNzR53AYM6W88hcAWnvyV1JSMtlHj7ge1jXzV7W6zGG31chfYNx1OOj2mvnbQ2u0rtxCc2/hPXxu5bErL67M9zsjauoHy2mbGMP7v5ZXmW8Vi4PZS4wRMnUVTHIHX2SToioCbN57lFc+2UjMgAsYNuEhz8nrtXFWlNDxun8S36EHpRn7saZ2x2kr5+j7j3qeE9OqPSeMn8HumfeTlJTMgb27SE17BGVy/XVrMFlisCa1o6iokNbuT2o+Cilv2unE2q4rVq/J6dakdjiK8+rVrNrQhm/vrb3TLruOdyeO5v3rWpOSaCWjoIx3pvufN+XrDr7vFr7FwV1bGnT0jBDCv5DkL4w799z565lZS7jn8lONH9Saw6qvdqkqPVzWpHZ0GPFnDr03sUYO885fPU65gAFj72feUzfxh4HdGtT0PefTH+jdxkTaRWcz9oqzGPWXl1kwKpnUJAtZRXZGzl2NU2sWX28UarUVTL7u4Jv+wXI27jzY4KNnRGhIUdUIVr3xJBVlpZ7vHU4ny6f+hZi4eM6+4++Vjzsc/GnmMvJaD+CEsf8kPrHhh/dqh93TC+Ae7OluJJ08fgwHPnzKMyDPzRyXABiHKKNUZXOnovLuQuV1FpbTQfq8iVgS22L22m40xyWQFGepMvfK6Vq5ys3K4N6001HaSbfe/TxNrQ1p+K4+ydz7mBuAeHthjXlTaX2cTJtwLeNf+sBTKPkq6Cr0J3SylshWoIhq7l4nN6fTwaZp92COS6h7JScIweYvZTIZx8yYTJX5y6vAKs/aD04H+6aNRTudxLRKrXKdTt37UJLagaSkZJ/5q11KCg9ceQYn9DbO3Ktv03f1Sebex9xAtfMBk1IAaB1n4pS2Rby6cDl/u9XYufBVzNmd62hnrZCtwDCToqoRVJSV0v2W/3i+LziyD6vFzJF5j7P5jUfQWpNXUIjJbCHlkgn079y91utUv1U5PzsLu8OGdtooz9qPdtrR9gpMSmGxWD19AuWuwZ5u7llU3s2hYPQ05Lqa09uj0XZXgrLGcWTWgziKc0Fr2nUw7tTp0ffYKlsE3qMc0uc9zqRxaeRmZWBObk/Pm6sm38OzJtTar+GP98HF1fugvI+5ASjMzUI7Hdh1Ju9tNhJqWXERtvKqhVJtBZ27cX1KWtsavVjBHJ4sRHPj7nVyK0nfjdliIX3e41Xykb+RCY2Rv6Cy6HPnr0nj0nA6HcaWn73Ck78A7IXZnkb0Hn2PrXIt7/x1aO8OcrMy6sxfZWWlDOzdhYS4GALlfXBx9T4o72Nu3DJyi7E5KieyFxSXUVpaRsqh9Z6iqrZirrJxvVWtfVjBHqAsAidFVROwWswc37sjptRWTL7zMhauOcjgEffQuVf/Ol9X263Kk8al0eOY4wBXUVNwFIfdRnlBpqf3waRMTBqXVusp8dUTZnnWfvK+/i8Z859AVTtby4SmW59+nk+LRUWF5GZlePoRHHY7Me26YokxluL73D6VQ3t3cPSj5wCj/8o906qiMIfcInzG5Yv74OLvFr7F3jVfVOmD+mRHPjc9u8hnoVOYl8OcR6/lX2dYmbTiwzp7pupqXA/m8GQhmjuzxULXXv1qFDv+BJq/ACrya+YvwG8Oczed50wexaHXbsNkqprDYqwWXvn4J0/+cn/wO7R3R5X8BcY2YEraw578BZU5rKIwhwpbGeOeeZ/xifF0TW1V5zgcN/fBxdM/WM6K1Rur9EEt2VnIBy884rPIcR+oPOnSGJ5ZbaoycqG296mrDyvYA5RF4KSoCiH3IE9bUYEx3dfFYlZk79lCZnYuq50DuOjBP9X72t5FzaZplf9jN8cl0Gb43cS6+hC8rZ98rSc5uV/nPSXYbdDtU9g98/46E6Z7hWrTtHuITe1BWZbx+zm1xlZRjt3V/Omw2zyv0U4n1lRjFc6c1JYOVz5C1179Aj693nu77/p35nHTKa3rNcl8zdL5XNy1mN7JNi7q6vsOv7oa17XWQR2eLERz4V5Z8j7EGAI8yNiPuvLXoNunkG42c8pf51d5jfu4LXch5H5dRUFmjeufOnFBnTnMe4V907R70BitDu78Be6hybYqr3PnMHNSW1qdeClZn0+j54VjyNqw1O/v7L3d93/v/MCtpyTVa5K5+0DlnskOzuvie1vPX+N6KA5QFoGToiqE3J9c+tz4IoN6Gwd4luZns/P7JWR06Etcu84MGFrzjr5AuJNC9enp6fMe9zl4UytTja06h91O1pIplKTvIvvz6Wh7OUcBR0ke96adjkmZ6Nqzt8+VJHNcAodnTaCiMAfQmBPbGv1Y1jjcJ9o4Sws8d/WY4lvRacy/GvQ7e68eJalS5qx2sGBzRZXn+GpsL8zLYeuKDxk/1Eb3tlZG9LYx3sdqVV1HzwAyekFEBfd/87Udwh6suvLX7pn313r3sPu4ra69+lV53eH3J1KetR+H3Ub259M5ajeKovrksPS5j+Gdv7TDgXIN/nTnLwBMFjrf9G8A4roP4pTHP6J4/f8C+p29V49iqeCN1QXMr5a/fDW1Z+UV8b9lq3luqIOebS1c3tvOo8tX1zlewVfBJuMXmpYUVY3EUVHOrh+XUu40kXL+zVgTWlOw4fOgr1t9jkp5qtE02cdPA6n7dYf27sBisRoHk2oHXW6dBoAt6wDxHXpQnrW/yqT16tyNqpum3YNDazrd8DzWmNgqd/KYE9vS+eaXcNoqOPr+X+r9Oxbm5fDOvx6E/MM8OcZoTv3groFcPz8/4CGd7lWqXm1NxFpM9Gpr8rla5atxPe7gCkyluTJ6QYgQqS1/PTNrSZXJ6/5el24y07VXP6MNoYE5LJD8BXD4rWr9SwpQdc/OysorYtw/ZlGYn8fC0cZZhV/f1YNRCwoDHtLpXqXq1dZMnMVEr7Zmn6tVdd2FOPaKs2T8QhOToirEHA4ntrISVr54LzHJbbHExFGyczVQtbHT39EPzZl2OqjI2ANoHEU5HH5rPCZrXJ3bCO5m8CvvmsTbT96JKjjCyCHJpCQaK371Pbh465oVrNyZxxdbKoeLZpU46ZD3bY3X+7oTcdncGXTb+yF//mA3/762jxyeLKJCoGf5tdQcVpm/jAOWA8lfYBRTt/xjNvuOZFNWXMDoIQkNPrj4q9Vb+W1HIUu2GINFnU7ILHEyOH9rjdfXdRfi0zOXkJebh1KtGhSHqD8pqkLo0593MHvlTm7950y6HVP3DPWGHv0QLLPZjK0oh/R5j6M1rsnF/SyIVwAAIABJREFU1HJWln+mmAQOzrjFdc6gw2skg4nCFTNpM/xuYpJTavR61cbdDL7olcdpU3qQEpPivbV5fLRzf5W5XYEeXDzwtPMZ0S23xnDR7Z3PC/j32/HLKr7blUN7awnDpu4kKTm5XjEI0RwFWhCFK4eZlIndM+8nNysjqBxmikngyJyHcZbkoTVV8lfe569gjkvAZDZz8oSZAOT//gNZ335Bxz7HUbOry1hdyjy8l6ICG/ExJt5eW8iSXQqTqXJlK9CDiy86fSAXdS2pMViUrgPr9TsuWr4Ok7Zz2tSDtEuOr3ccov6CKqqUUtcCTwEDgdO11mtDEVRz89uedF5d+hvmvmcz7IH7MYWgsbM+fH2ydE9K9+aexwJQVGYnvkOPGs+Bmp9C87OzWPfsdZ7DUG1FOXQa/U8y/vc83W+b5lk+1/YKrDGxHJ41wVPA1TVQDyobxV8Z0Zmb3lzHK5fH8+8f7Vx5SkcO9ryGC8fcU2WsQSAaOlzU2/WPvRLS8wFlNEPkkRwWGWrLYbainFpXh1qnpHq2DAPNYe78BaC0E61MdBr9T8xmMxn/e4G2lz9cJX+BMUbBXcABlOZlkto6kcwD60hNjq3yXu4eqMeHWnhmpR2L2cToU9qS2Ps0Hr7h4iojDQLRkMGi1WXlFdEu0cKM63qE5HxAGcsQmGBXqn4DrgZeC0EsEa22c/2cTiflZSVcNuoWTrrxH8QlJDXa+9e1JF/XKfG+XuNvZlRdn0LdQz6Lvn4FZ2lBlZkz3t0G7gLO323Y7kbxTpYiRh9n4olvSvjjwHgqHBX8vtLoYarvWIOGDBf1FVeomtRlNENEipocFs7tOn9birW9/+TxYyj64j/s9vEaf+rKYUlJyZ6+K1tRTq35CyoLOKg8+y82xlrjmnM+/YFzOpdxfAfF0B5mtmZqcNg8ZwHWd6RBfQeL1ibUDeoyliEwQRVVWuut4DV9uwXzPtdPa82uVZ9SmJ9Hxb5NnHnN3Y3+/g1JenW9ZvL4MeRl7mDftLFVHnffOeOv6Kp+p5D3zBkHVaci18W9SvX4NUmUZe9lzPFWPthio9Tm4Od9uQzrH+OZUdWUYw1CfT5g9Wnw9b2OrHI1jmjKYeHaroPQ5y8wiquG5jDva08al0ZMbGyV/AVGDktK7es3Tvcq1TNnOTArxcgBFu7aWUb+70VcfFxbz4yqphxpEOrzAYMdyxBNq1zSU1UPToeDAxu+JefQXtqdfAXduh7bJAmpMfhLWP7uxvFW/VgLt0A+AbtXg+LthZitTkqB206O4e1fyji/dwwzlx9AWd5k/PldGm2sQW0FS11jFhry3sGueskqlxBVhSqH5WWmU/S/F2o8rrQzoGLQfade91aKogrNCZ3MDOtjYc1hB/9ZnonFspw/X5DSaCMNaitY/I1ZqK9gV72iaZXLb1GllPoa6FTLjyZprQMb2GFc507gToDX/nIdd444O+AgI0FZWRkbP36TNiddSreTrwr6eoHeYRNqdW0BAJ6feU9ON5vNNW6Fdl8rNysDFZdM57Eveh5XgKPgaJ23Nbu5e59mfpuFw24j0QpJMQqzSfHxDjCZnLSx2vnjMcYcm8YYa1BbwRKKniy3YFe9gl3linahyGHe+evBJ59n0IUjQxhh89Tcc5j7Og6t6Xx95RR1BVhiYmusgPmyYv12NvxezBs/O0iwapJijOIqrwxMJki2Ori8r3FsVmOMNKitYAlFT5ZbsKte0TZ81G9RpbUeHoo30lq/DrwOwA9TaztKPCIdOJrLC4vXUlShGXjlwzWOcmmocN1y7G8LwHvqsPskd/d05fQDu8nNyqgypV07HThL8rDnHfVcSzvtxMTGkp+dVeunRe8VLHfvk/vsvfevM6amZxSUkTZtKwkWE+OGWEhwFOB0tA/5WANfBUsoerLcgl31CnVvV7QJRQ7zzl9fbk7X+7JrrsxGm+aWw9wDRN05LDcrwzikWVMjfzksxh3N3vmrNC+TFZ8n0r5VXJUjaj6eMt5z9t6CUcmkJlk4km/jnOn7QDu5eYgVs6MMu8MZ8pEGvgqWUPRkuQW76hVtw0dl+8+HopJyXvxoLXsc7Tn+mr8R9/3NISuomgNzXAIH3hyP1hpHSR7pJjNOpwNr604kXPIQnbr34dDeHcSm9mDv1LFV7sApz9pv/Fw7A+rhKMzLYdpDoxjZX3sKj3h7ISMHKD7crFm01c6sjQU4LDuISzRuBqhrkvo7/3oQheKmSS/7XdFpioIlmFWvUPd2CREtnCYz+2caxY+jJA8Ac0IbzK3ae84p9ZW/uvbqR7rJXCV/Hf32PQYOvYRtb/+1xnu9+uGKKvOglL2Uq/qb+XCzk0Vb7czeWITJaqNVojG1va5J6uP+MQuFYtYT4/yu6DRFwRLMqleoe7uag2BHKlwFTAXaA58qpTZorS8JSWRhorXmhQ9/Yu0hG8ddNo6z+hpzQcK11N3YvD+9AZ7l8pRL7sPhcBCb2oPDsyZwwvgZbJp2D11unlLlXLBQWLN0Pm3KDjF3TRyfbLPhdDopys3EpJ0M6mjmoxvasP6QjfHft2Hs8x/WWUysWTqf4j2/0DrO5LdA8m6Szzq0j+uGdOTGD0NfsASz6hXq3i5RVUvMYb5EWw5rd/G9JHQyGs0Pz5oAQJdxL4U8f0HVeVBtEuPIyClAaSeDO5r5+IZWrDtk49FV8Sya8lCdxcScT39g1+59tIlTfgsk74LFZncytHMZf/ZxlE0wgln1CnVvV3MQ7N1/i4HFIYolrBwOJx//tJ35P+xlwBV3cNFVJ1T5eXOeEFwXd+FkTTKOg6m+5dfYPNtvNw7wzIJa/dk88r9/i/O72njoLGNgXV3HzHhfa/OyD0mJ1zx+joXHfZz15+bdJF9uKyXOXsiV/VREFSyh7O0SNbWkHOZPtOWwkvTqwxgaR/V5UBecMYT1q3/knK5OHj7LWJmq65gZ7+ss+uZnUuI1k86x8qyfAsm7YEnPLqC1xc55XXREFSyh7O1qLmT7D9i0+wgvfLSBjmdcxQUPPlRlgrcIgHZ4PgmCcStyeWqHWg9Jra627bcdv6xi/75iNux38NKPpZ7nKpOZzkW+i4k1S+fTLaaA83tbObGz2W8RtuOXVaxLL2HmiizaxStySg+Q2CaVVhFUsISyt0sIUYsg8hfU3IKbs2wdR3PKWL3fyZQfyzzPM5tNnFjsu5iY8+kPtLeWcU4vKyd1tvgtwtwFy7sbj5KZW0C7eBM5pbUfZRMuoeztai6iuqjKKyxh8gc/kRF/DGfc+QKxcfH+XxQmoRrcV30LwN2saY5LACqXyW1FOQBVflad2WymPGs/ymQmKa7yX6Wk1L5MnDbX7y3NvvqFGjK53L1K1aqihJuGJNEmXjGit43xdaxW3fX8uyybO4Njjyxm/DmprmNsrmq0gkpmTYloFcrBo4HmsPKCTGJbtQeMHtHqY1+CzV9Qe8/Qkp1Ovnn16XrPcVr0zc+Yy8sZOySR1vGKy3vbeLSO1Sp3wfLie1/CoXU8fG7rBh1lU58Yo2XWVDCisqhyOp1MWbyW9UfhhCsncEzn7uEOya9QDe7zTmDukQgpaQ9XeY7ZbKbki/8AVHlPc1wCe6fdjHbaSTdVNu0r7aw1Ofrr4Qhlv5D3KlVqorHS6G/LsKmbwGXWlIhWoRw86s4z3oWadw5zj09Y9+x1Vc4d3TzzkaDyV2leJlu3L6N9qzjPY6HqGfJeparMX4FtGTZVI3g0zZoKRlQVVU6nkzeW/sKy3/MZcNH1XDjilHCHFHKBfiKcPH4MB/buQmvI+GSK53FTTAKpl9zjec36ydeivc4Q1E4HlsS2xCS2YtDtla+rLTn6+wQayn6hhmwZNmUTuMyaEsK/+uYva1K7KjnMO3+ZlCmk+au2Y2pC1TO0Yv121uxv2JZhUzSCR9usqWBETVG16re9TP98Cz3Ov5Hhl5wZ7nAajfsTYfUp57lZu5g0Lo3CnEyS27UnNyuD9tc8hTJb0NoYG6ZQHJ3/OOnzHqd7r8olcO9PmO7bkL17EBoqlP1CDblWUzaBN3R0g2wZimjivaLlncPc+Ss/O8s4p0+ZaH/NU1jbdcWefxStdZX8VZ7awXNUTWPlLwhdz1BDr9NUjeDBjG6Itm3DFl9UHTiay9ML1mLteyZD733RcwJ5S+coK6HLuJc837tnr6x79jr63D6VTdPuQZksWFOMrU/36ewxye1IirO02DuFvDVVE3gw24yyZSiilXcOc+evQ3t3kL3EOL1BmSwoSwzWlO418pf7EOT6HLfVHDVFI3iwW4zRtm3YYm9zKymr4Jm53zNp6VEGjH6cky6+LmoKKoCKolxKM/Z7vhx2O4f27kA7A7ujJdIU5uXwxqTbKMrPDXco9VbXNmNd3MXYlKu68vvKxc3ydxeiITbPfISKwpwa+ctht4c7tAbLyiti5F//S3Z+cbhDqZe6thj98d42XPLtmmb3uzdEi1up0loz9eN1fLe7iOMuv41z+g4Id0h1CrSHoN6D+7TGmlrZgO/+JKdpNicEVeFrxaY5bI81dJtRjqcRka6x8pejrARzUltPDnPnr6aaPdUYfK3YRPr2WDBbjNF2RA20oKLK6XSy8PutLFpzkL7DbuDiK84Id0gBCfSumMbYjlMmE7asA4Bx3pXTYsFWlENSal+fr3Ha7ZSk76a8IJN1z17nedxiUiGPz62uJu9v5s2gYud3fDN3BiPufqzRYghGQ7YZ5Xga0RxI/gqMr0bvrLwiht03BWtFAa8uXM7fbo287cqGbjFG4xE10EKKql93H+a5Rb/Q4Q9Xc+GDfwl3OGHl/kSotQNtr/A8Xj1lmOMSyFnyb8/3tqIc2qZ28DSoV7+em3smTEKHnn7vnqmuoatKvlZsCvNy2Pb1PF4ZHsMDX89l2Jh7WkzBIcfTiGjkzje2ohxUXLInh0VC/gLIyi/mgSnz6r2q5GvF5tUPV1CQl8v0y+N47MsfuXfkBS2m4IjGI2qgmRdVuQUlPDnvZwpb9eWse/8TVT1TvrgTyn1XnErm+4/WfILDzu6Z9xMPUMvAO1/Xc5s0Lo2ESx7C4XB4ztgCI1lNHj+mzk+kDWm6rmvF5pt5M0jrY+f83nGk9SmL6NWq+pLjaUQ0cuePSePSKCWmRg6ryM/EZFIkxXVqcP7qc/tU0g/srpG/Jo1L8zuM9L2lP9a76drXik3aOSfy7meruHqghbO7mxnW3Raxq1UNEY1H1EAzLapsdgfTPlnP+kwzx6c9RNsOXcMdUsRp075Trcvyvzw3Juhru8/a8mZNaldrb4VbQ+c0+Vqx+W7hW2z7eh7PXmMl3qq4/WQr//dhy1mtkuNpRLTzXkly++W5MbROSQ362tVzmDWpHX1un1rnilVZaSmfrV7Ha/Wc1eRrxWbCf+ai7GXcemIcCVbFuCEWblrSclarovGIGmhmRZXWmjc/38CXW3Pof+Fozr3y9HCHFLF8NYZqpz1kk43ro6FN175WbEoqFnFVHzv9U4yjhfqnWEjrU9qiVquEiFaRlr8Adv++mZF96990XduKjd3hJCMngzGDzRybasZsgt5tVItbrYpGzaKo0lqzavMBXvtyC52HjmL4/UNRqnEbC5tKve/qC5CvJexwzG3x13RdV6+VrxWbJ0eewqfb7Xy/v8jzWH6ZpmTXQimqhGgi0ZC/wMhhh3f/zg0XtQOqNl1rreu8e6+2FZsX3/uSqXOX8snvdr7da4wZcGgorICkI+ulqGrGIr6oOpCRyzML1mDuO5Qz7/53i+ubam5DNpOSkjkw73GsSe2qPG4cZlpR62v8NV0H0mtVvfDSWlPiMFFSZeyJQpua58gIIZqj5pi/ds+839Ow7ubr0Hi3NUvnMyAFUpOMMwOrz2ry12dVfWzCivXbKXOYqMBJsdfoLbPZRI+O7Wq9hmgeIraoKq+wMXHO96STyqmjnyC5TUq4QxJQ69E1br6W4Otquj7tsusC6rWqXng9vWh9aH4hIUTU8G6Ery2H+bLjl1UcSLdz5n8zMXntkqQc3IqtrMjvmXjVZ1RFa79RNIjIouq/SzfwzbY8Thn1MAM7dgl3OKKa+i7519V0vWzuDL+9VnIYsRAilBqSw2o7UPnF976EQ+vq7LOSw4ijS8QUVVprFn23hYVrD9Nz6NVcfP+54Q6pRQpFD0SolvwDHXApk8WFEBC6Hq5Q5LBAh1tG41TxaBZUUaWUegG4EqOZZhdwi9Y6r77X2bLnCM8t/oW2J1/J+ff/ucU0oYdSoMdBNNbrG0MgAy5lsrhoTKHKYcK/YHJQJOavQIZbRutU8WgW7ErVV8BErbVdKfUcMBGoZeJk7bLzi3nmgzXkt+rHaXe8QGx83c2C0SzQ4yAa6/WNIZABlzJZXDSyoHKYCFwwOSgS81cgwy2jdap4NAuqqNJaf+n17U/ANYG8rqzcxt/eWckRZ1sGX/YAAzt39/8i0eIEMuAyUieLN4eDnIV/Dc1hQgTSbB6pU8Uj/RDn5iyUPVW3AvN9/VApdSdwJ8ApJw3h2semcWwHaUIXdYvUyeINOXJHRDyfOcw7fz345PMMunBkU8YlmqlIvcuv+t2IInRM/p6glPpaKfVbLV8jvJ4zCbAD7/m6jtb6da31qVrrU+96aRHtpKASzZS7z2vKVV35feViivJzwx2SqEMocph3/rr82puaKnQhQs77bsQl364hO7/Y/4tEwPyuVGmth9f1c6XUzUAaMExrLZMXRYsndyM2L5LDhKgkdyM2rmDv/rsUo6nzPK11SWhCErUJ9lZif6/3dXdNYU4mye3a1/q65jZNORTkbsSWRXJY0wkmh0n+Cg25G7HxBdtTNQ2IBb5yjUH4SWt9d9BRiRqCTQD+Xu/r7pp1z14XcXfdhJPcjdjiSA5rIsHkMMlfoSF3Iza+YO/+OyZUgQjRHETq3YiiYSSHiWgSqXcjtiQRM1FdiOYgUu9GFEIIfyL1bsSWxO/df0IIIYQQwj8pqoQQQgghQkC2/wTg++4ai0mF5ABTIYRoLJK/RKSQokoAoTm1XQghwkHyl4gUsv0nhBBCCBECUlSJqFWYl8Mbk26TY2aEEM1SVl4RI//6XzlqJoJIUSWilvehyEII0dx4H4wsIoMUVSIqyaHIQojmTA5GjkxSVIkWzdcWX9VDkZHVKiFExKlre6/qwcgmWa2KEFJUiRatti0+9yrVmJMrD0WW1SohRKTxtb3nXqUae7JxCPLYkxNltSpCSFElWqzqW3zp+3fzxqTb+H7xLJ+HIgshRCSovr23fX+GZ9WqroORRXjJnCoR0Qrzcpj3wp8Z85d/k9S6bb1eW3WLr5hPZvwdS+ZWNh7czRblkEORhRCNKiuviLuefZfXJ95ESuvEer226vZeGY9O+4D8jMPMXrJKDkaOYFJUiYjmvX1Xn4LHvUr15HXGFt+oIUm8M301r15/DI99U85Nzy6qd5EmhBD14b19V59ix71KtWCUMfl9zJAEXp2+i3dv6MJj36zhgxcm1LtIE01Dtv9ExArmDj33KpV7iy/eXsiYwRbW7CmQrT4hRKML5u686tt7yl7K9YMt/LCnVLb5IpwUVSJiBXOH3o5fVjF/UxnnTD/I2VP3c870AyzZ7uCHXQXSmC6EaHTB3J23Yv123v+1nFOnZ3Dy1KOcNv0oS7Y7WLGzWJrSI5xs/4mIVH37bszJrbl+/mJOv3x0QNt2dz3/rufPy+bO4Ngjixl/TqrnMXeRJj1UQohQq759N/bkREYtWMPNaWcHtG338ZTxnj+/+N6XcGgdD5/b2vOYu0iT/qnIE1RRpZT6BzACcAIZwDit9eFQBCaiW/XtO+879OpbCO34ZRW/ZJRJY7qoQXKYaAx13Z1X30JImtKbl2BXql7QWv8NQCn1APAEcHfQUYmoF8pCyHvVSohqJIeJkAtlIeS9aiUiX1BFlda6wOvbREAHF46IZJPHj6GoqLDG40lJyUycNjek7yWFkGgKksOiR1PmLymEolfQPVVKqWeAsUA+cEHQEYmIVVRUSJ/bp9Z4fPfM+8MQjRChITksOkj+Ek3B791/SqmvlVK/1fI1AkBrPUlr3R14D/BZniul7lRKrVVKrV35cWg/FQghhC+hyGHe+euzD95pyvCFEM2I35UqrfXwAK/1PvAp8KSP67wOvA7wxsrdssQuhGgSochh3vnry83pel92SegCFEK0GEHNqVJK9fP69o/AtuDCEUKIpiM5TAgRSsH2VD2rlOqPcTvyPuSuGSFE8yI5TAgRMsHe/TcyVIGIyJeUlFxrU2dSUnIYohEieJLDoofkL9EUZKK6CFiobzsWQoimIvlLNAU5+08IIYQQIgSkqBJCCCGECAEpqoQQQgghQkCKKiGEEEKIEJCiSgghhBAiBKSoEkIIIYQIASmqhBBCCCFCQIoqIYQQQogQkKJKCCGEECIEpKgSQgghhAgBKaqEEEIIIUJAiiohhBBCiBCQokoIIYQQIgSkqBJCCCGECAEpqoQQQgghQkCKKiGEEEKIEJCiSgghhBAiBEJSVCml/qSU0kqp1FBcTwghmpLkMCFEKARdVCmlugMXAfuDD0cIIZqW5DAhRKiEYqXqP8BfAB2CawkhRFOTHCaECImgiiql1B+BQ1rrjQE8906l1Fql1NqVH88N5m2FECIkAs1h3vnrsw/eaaLohBDNjcXfE5RSXwOdavnRJOAx4OJA3khr/TrwOsB3OzLlE6EQUaRT67iwvXcocph3/tpyuEBnF5eHNEYR+eynnYapUyew+v3fpmhpTIH/nSutG1bfKKWOB74BSlwPdQMOA6drrdMbdNEQUErd6UqAYRUpcUDkxBIpcUDkxBIpcUBkxdIUIjGHRdLfQaTEInHUFCmxREocEDmxNLioqnEhpfYCp2qts0JywYbHsVZrfWo4Y4ikOCByYomUOCByYomUOCCyYgmHSMhhkfR3ECmxSBw1RUoskRIHRE4sMqdKCCGEECIEQrY5rLXuFaprCSFEU5McJoQIVktcqQr7nqpLpMQBkRNLpMQBkRNLpMQBkRVLtIqkv4NIiUXiqClSYomUOCBCYglZT5UQQgghRDRriStVQgghhBBNrsUVVUqpfyilNimlNiilvlRKdQljLC8opba54lmslGoTpjiuVUptVko5lVJhuTtCKXWpUup3pdROpdRfwxGDK463lFIZSqnfwhWDK47uSqnlSqmtrr+bB8MUR5xSarVSaqMrjr+HIw5RKVJyWKTkL1csYc1hkr9qxBER+csVS0TlsBa3/aeUaqW1LnD9+QHgOK313WGK5WJgmdbarpR6DkBr/WgY4hgIOIHXgD9prdc28fubge0Y56sdBNYAY7TWW5oyDlcs5wJFwByt9eCmfn+vODoDnbXW65VSycA64P+a+p+JUkoBiVrrIqWUFfgeeFBr/VNTxiEqRUoOi5T85YolbDlM8letcURE/nLFElE5rMWtVLmTkUsiYTzPS2v9pdba7vr2J4zhguGIY6vW+vdwvLfL6cBOrfVurXUFMA8YEY5AtNYrgZxwvHe1OI5orde7/lwIbAW6hiEOrbUucn1rdX21rE9azUyk5LBIyV+uWMKZwyR/1YwjIvKX6/0jKoe1uKIKQCn1jFLqAHAD8ES443G5FVga7iDCpCtwwOv7g4TpP8BIpJTqBZwE/Bym9zcrpTYAGcBXWuuwxCEqRWAOk/xVSfKXl3DnL1cMEZPDmmVRpZT6Win1Wy1fIwC01pO01t2B94Dx4YzF9ZxJgN0VT9jiCCNVy2OyGgIopZKAhcCEaisUTUZr7dBan4ixEnG6Uips2wrRIlJyWKTkr0BjCRPJXz5EQv6CyMphzfJkSK318ACf+j7wKfBkuGJRSt0MpAHDdCM2sNXjn0k4HAS6e33vPmMtqrn2/xcC72mtF4U7Hq11nlJqBXApENZG2JYuUnJYpOSvQGIJI8lftYi0/AWRkcOa5UpVXZRS/by+/SOwLYyxXAo8CvxRa13i7/kt2Bqgn1Kqt1IqBhgNfBzmmMLK1Vz5JrBVa/1iGONo776rSykVDwwnjP/NiMjJYZK/PCR/VRMp+csVS0TlsJZ4999CoD/GnSL7gLu11ofCFMtOIBbIdj30U5ju4rkKmAq0B/KADVrrS5o4hsuBlwAz8JbW+pmmfH+vOOYC5wOpwFHgSa31m2GIYyjwHfArxr+rAI9prT9r4jhOAGZj/L2YgAVa66ebMgZRVaTksEjJX65YwprDJH/ViCMi8pcrlojKYS2uqBJCCCGECIcWt/0nhBBCCBEOUlQJIYQQQoSAFFVCCCGEECEgRZUQQgghRAhIUSWEEEIIEQJSVAkhhBBChIAUVUIIIYQQISBFlfBJKfWYUmpmuOMQQgghmgMpqlowpdRepdRRpVSi12O3u85G8ktr/S+t9e2NENcKpVSZUqpIKZWvlFqplDo+1O8jhBD+KKWGKqV+cOWiHKXUKqXUOUqpYqVUci3P/0UpNV4p1UsppZVS66v9PFUpVaGU2ttkv4SIGFJUtXwW4MFwB1GL8VrrJCAFWAG8E95whBDRRinVCliCcQROO6Ar8HcgH+Mg5ZHVnj8YOA6Y6/Vwoutxt+uBPY0YtohgUlS1fC8Af3IfOFmdUuplpdQBpVSBUmqdUuocr589pZR61/Xnz5VS46u9dqNS6mrXnwcopb5yfdL7XSk1KpDgtNZ2YB5GonJf93Sl1I9KqTyl1BGl1DTXQaYopaYrpaZUi+MTpdQE15+7KKUWKqUylVJ7lFIPVLvuWtfvelQpFdaDQIUQYXcsgNZ6rtbaobUu1Vp/qbXehHGe3Nhqzx8LfKq1zvZ67B3g5mrPmdOYQYvIJUVVy7cWYyXoTz5+vgY4EeNT2vvAB0qpuFqe9z4wxv2NUuo4oCfwqWt78SvXczq4nveqUmqQv+BcxdINwE9eDzuAhzAODT0TGAbc6/rZbGCMUsrken2PR5AwAAAgAElEQVSq6+dzXY99AmzE+MQ5DJiglHIfvPoy8LLWuhXQF1jgLz4hRIu2HXAopWYrpS5TSrX1+tk7wDlKqR4ArvxyPTULpneB0Uops1JqIJAM/NwEsYsIJEVVdHgCuF8p1b76D7TW72qts7XWdq31FIxT6fvXco3FwIlKqZ6u728AFmmty4E0YK/W+m3XddYDC4Fr6ojpFaVUHlAEjMdYcnfHtE5r/ZPrWnuB14DzXD9bjbE0P8z19NHACq31UeA0oL3W+mmtdYXWejfwhus5ADbgGKVUqta6SGvtXcgJIaKM1roAGApojFyRqZT6WCnVUWt9APgWuNH19GFAHPBptcscBH4HhmOsWMkqVRSToioKaK1/w+gb+Gv1nymlHlFKbXU1aeYBrTFWiKpfoxAjmbgLlNHAe64/9wTOcG3X5bmucwPQqY6wHtBat8FIUmnAh0qpE1wxHauUWqKUSldKFQD/qhbTbCoT3Y1U9mP1BLpUi+MxoKPr57dhLPdvU0qtUUql1RGfECIKaK23aq3Haa27AYOBLsBLrh97bwHeBLyvtbbVcpk5wDiMVfp3GzdiEcmkqIoeTwJ3YGyLAeDqn3oUGAW0dRU5+YDycY25GFtvZwLxwHLX4weAb7XWbby+krTW9/gLSmvt1Fp/B+wELnY9PAPYBvRzbdU9Vi2md4ERSqkhwEDgI6849lSLI1lrfbnrvXZorcdgbFE+h1HIJSKEEIDWehswC6O4AlgEdFVKXQBcje9VqIXAFcBurfW+xo5TRC4pqqKE1nonMB94wOvhZMAOZAIWpdQTQKs6LvMZxmrQ08B8rbXT9fgS4Fil1E1KKavr6zRXf4FfriLtOGCzV1wFQJFSagBQpTjTWh/E6AV7B1iotS51/Wg1UKCUelQpFe/qcRislDrN9T43KqXau+LOc73GEUiMQoiWx3WDzSNKqW6u77tjrDb9BKC1LgY+BN4G9mmt19Z2HdfzLgRCPoJGNC9SVEWXpwHvlZkvgKUYzZr7gDKM1Z5aufqnFmH0Drzv9XghxirTaOAwkI6xEhRbRyzTXHOqijCKo8e11ktdP/sTRkNoIUafw/xaXj8bOB6vUQxaawdwJUbj/R4gC5iJsaUJcCmw2fWeLwOjtdZldcQohGjZCoEzgJ+VUsUYxdRvwCNez5mN8WGyzl4prfVarfWuxgpUNA9Kax3uGISoN6XUuRjbgL28VsyEEEKIsJGVKtHsKKWsGANNZ0pBJYQQIlJIUSWaFVefVh7Qmco7dIQQQoiwk+0/IYQQQogQkJUqIYQQQogQkKJKCCGEECIELGF513WzZM9RiGjSfgD0+IOvobLNy65lmrz94Y5CtCAOh5M5X6zj261HoU13Lr55Ako1/X8uO39ZxeYfviLZWcClQ7py6R+OIzG+rsk4UcIcAydeH9BfSHiKqvLCsLytECJMbKX+n9Nc2Eolh4mQqLDZmfXVJr7ZlkPPc0dx9t1nAlBcHp6ZxJ0GnkGngWfgdDr5cdMaPnrzKyjJ5bhO8Vz9hz7079nR/0VaIpM14KeGp6gSQggholRJWQWzvtrE8u15HDP8RoZffEq4Q6rCZDLR/8Qz6H/iGQBkHTnAlNVfYvvoK4b2T+WK0/rSKaWuwzeilxRVQgghRBPQWvPpz9uZ9cNhjrv0Zi6+7PhwhxSQ1M7dSR1xGwB7tm3k0U++JCZ/P9ecfQwXn9I3LFuVkUqKKiGEEKKRZeQW8tfZ35Fw3HAuvv+hZluI9BwwhJ4DhqC15uNvFvHO1JUM7mhl7IWD6NK+tf8LtHARU1Q5URSb2+GwxAGR+C+bxmwvI9GRgwnpsxdCVIr8/AWSw8Ln61/28MbyPZx927PEJyaHO5yQUEpx4vCRMHwk6Xu389jnnxJbsIZ7Lx3EkL6dwx1e2ERMUVVsboc1qQ1JykEkFvBaQ7mOo7gIkh3Z4Q5HCBFBIj1/geSwcJn37RZW5LTnwvuex2Q2hzucRtGp17F06nUsFeVlvPTJ2zg/XcbwgamMvmAQVkvL/J19CbqoUkrFASuBWNf1PtRaP1nf6zgscRGdkJSCWByUWeIgPDdmCCEaQShyWKTnL5Ac1tRsdgcTZ39PeeeTOOXKMeEOp0nExMZx1jX3oLVm66YfufGVhVzYvzW3XXIiligprkKxUlUOXKi1LnIddPu9Umqp1vqn+l1GRXRCAlzxRXiQQoj6CkEOi/z8BZLDmkp5hY37/rucvv/3MKldeoY7nCanlKL3kLPodcKZ7Nu8mhunL+SkzlbGnDeQHh3bhju8RhX0RHVtKHJ9a3V9NdsN+8+/W0f/y+/hmEvu5Nk3Pgx3OEKIRtaScpjkr/ArK7dx94zl9Lv6z1FZUHlTStFr8BkMu+95nGfew8Qlh3jwtWXsOpQV7tAaTUiOqVFKmZVSG4AM4Cut9c+huG5Tczgc3PfP11j62pNs+WQ6cz9byZadMjlZiJauJeQwyV+R4cXFP9Ptottp16lbuEOJKKmdu3H+zY/Se9QTPL0sjyfmrKSopDzcYYVcSIoqrbVDa30i0A04XSk1uPpzlFJ3KqXWKqXWvv6/VaF425Bb/esOjunRmT7dOxETY2X0Zefwv2XNLrcKIerJXw6rkr8WfB6eIP2Q/BV+ry3dSHbX8+hxbPOYPxUOicmtGTrmQaxD7+KW13/mmXmryC0oCXdYIRPSu/+01nlKqRXApcBv1X72OvA6AD9MDWpp/fQbJ5GVX/PYi9TW8ax+95kGX/fQ0Wy6d0r1fN+tUyo/b/q9wdcTQjQvvnJYlfy17VNNzu4Gv4fkr5Zp7fbD/Jhu5uybLg13KM1Cpx596DT+OTIO7ePuOTPp39rG/Wkn0r5t8x45EYq7/9oDNlcyigeGA88FHVkdsvJLGXTXf2o8vvm1h4K6rtY1az0lTZ1CtGhNncMkf7U8RSXlPPu/37jowZp/r6JuHbr2ZPjd/yA7/RATPnibMzvaufPyk4ixRszEp3oJxfZfZ2C5UmoTsAajH2FJCK7b5Lp1SuVAemUD3cH0LLp0aBfGiIQQTaBF5DDJX+Hzt3e+49TrH8VkCklHTVRK6dSV8299nEM9r+CGl5fz2qfrKS2vCHdY9RZ0Kai13gScFIJYwu60wf3Yse8wew6m07VDCvOWfsf7z/8p3GEJIRpRS8lhkr/C47tf91Hc4SRSOnYJdygtQp/Bp9Fn8Gkc+H0jN7/yBncOO5ZhJ/VpNsf6NM/1tUZisZiZNukuLrnjKRxOJ7deNZxB/XqEOywhhPBL8ld4vP71NobeK9t+oda9/xC69nuFRcsWMf+Vz3n8ujPo2SnyV16lqKrm8vNO5fLzTg13GEIIUW+Sv5rWa0t/oeuZV7XY42fCzWQycdLwayg+4yImfvgqQ1pt4/4Rp5IQFxPu0HxqlkVVauv4Wps6U1vHhyEaIYQInOSvlmPF73lcOP6CcIfR4iUmt+aCWyZyaNcWxk6fybih3bn8tGMisoetWRZVwdx2LIQQ4ST5q2VY+vN22g04M9xhRJWufY+j033Ps+znr1jw8mc8NvIUBvRoH+6wqoi8Mk8IIYSIcHO+38uQYVeHO4yoY7ZYGHj2ZZx667+Y/M1RXvl4fbhDqkKKKiGEEKIedh7MJLHrgGZzR1pLFJ+YzNCbHuVot+GM+ffnETOVXYoqIYQQoh4W/rCT3mdeHu4wBNDrhDP5w23PcMfb65n1xS84nc6wxiNFlRBCCFEPmw6V0KFrz3CHIVwSkltxyX2T+bXVOUx4fRmZuYVhi0WKKi+3TnqZDkNvYvAfx4c7lIiSlVfEyL/+l+z84nCHIoTwQfJX7UKdv7YfzCKuS/+QXEuE1oDTzqNL2p8YP+cX1v1+ICwxSFHlZdxVw/j89afCHUbEmfPpD+SmH2D2klXhDkUI4YPkr9qFOn8t37if3mdcHJJridBL6dSV8+/8B9M3KGZ+sbHJ31+KKi/nnjqYdq2Twh1GRMnKK2LJt2uYcXUqS75dI6tVQkQoyV81NUb+WrevgPadZVJ9JLNYYzhz5F1sNh/HI69/3aRnCDbroiort4CR458mO68g3KG0WHM+/YG0Y0z07xBL2jEmWa0SIkQkfzW+UOev8gobRc4YmaDeTBx3/v/RMe0v3DVjOcWl5U3yns1y+KfbnEVfkHtoJ7MXfsHDt10b7nBaHPenvAWjkgEYe3Iioxas4ea0s0lpnVjn6+569l1en3hTnc9rSqffM52swpr/UaUmx7J6xn1hiEhEO8lfjasx8ldxWQUJqV0bNe7aTB4/hqKims3XSUnJTJw2t8njaU7adujMCWP+xt0z/smrd51LcmJco75fs12pysotYMlXy5lxdUeWfLVcPu01AvenvNQko/ZOTbIE9GkvEnuwsgrLGXTHlBpftRVaQjQ2yV+NrzHy1wff/U6XIec2Srx1KSoqpM/tU2t81VZoiZpap7TnpLFPcPfr35FfVNqo79Vsi6o5i74gra+if8c40voqZi/8ItwhtTgr1m/n/V/LOXV6hufr/V/LWbF+u8/XSA+WEP5J/mp8jZG/DmQV0rZD069UieAlt0nhtJuf4u7Xv2/UQaHNcvvP/SlvwajWAIw9pRWjFizn5pGXkNKmVYOvO+ZPL7Bi9W9k5RXQ7YJb+Pv4Mdw2Mnrv8vh4Sv1vza7aw1DG7CWrePiG6P1nKER1kr+aRmPkr8yKWHq2bhvKMEUTSmzVhj/c8jT3zHyCabeeSWqb0N/Y0SxXqtyf8qos64bg097cf/+ZIytnY9u0mIPL347qhNQQ7k95Y082+hDGnpwoq1VCVCP5KzIFkr8sFmlQb+4Skltx1m3/YMKb36G1Dvn1m2VRtWL1RmNZd9oRz9f7v5azYnXTz6SoLpoHZTa0h0GIaCL5KzL5y18lZRXklzrCGaIIkfjEZPpcfCt/fuvbkB9rE/T2n1KqOzAH6AQ4gde11i8He926fPz/7J13eFRl9sc/994p6b0CoYQu2HDtBRsiAmJFUESxo1jZZdefrusW17aiSLGxokiRJhaKgJW60jvSQgkhbZJMMpNkyi2/PyYzmUkmySSZQALzeR59YGbuO+8E5su55z3nez76V0su3yy8ixzPtmOvX7Ye4GSBnTm7Cnweb5d/IOCfRUt16SVFG9nzyXi/j4c4uznVGhbSr9ZJQ/plMluJyehV7xot1aUXFRVN1vSn/T4eomlk9O7HnqI85v26i5HX9Q3ausGoqZKB8ZqmbRUEIRrYIgjCKk3T9gZh7TaFd5Hj2CUNt+6eaTSlhqEm7i69mvgLiBpDyDYhRD2ENIyQfgVDv9xdejXxFxA1hpBtQsvQ56pb+PrD9Vx3XhlpiU2vZ/Sm2cd/mqblapq2terXFmAf0IT2CI0WON4MKq791b3JxhrNBTPV3pJp+7P5SCDEmU9wNKz16xfUr2FNMcoMljaE9CvE6eLqB19i/Iw1QTsGDGpNlSAInYELgd8ae60k27BrUqsVJk0DuyYhyTa/zzelSDuYfk6BrtUUgWmNvlOng0vGTiVz1MRa/10ydurp3lqIINFUDWvt+gX1a1hTm0yCpQ2NWaexGhasPWbllaILb9tjgF4fN5KXHhxS67/Xx4083Vs7bRjDwkm/5l4+XxmcmsagWSoIghAFLAKe0zStlpOdIAiPAY8BfDThHh4bdqXP85FKMeVWsOnCACFY2woiGpJsIVIp9vtsfUWO/moTgplqb8xaja2ZONuPBLxpqaPJEK2D+jTMR7/+Po7Hbuzhc23r1y+oT8Maq18QPG1o7DqN0bBg6tdPu07Q645Hm3Rta6GljifbOj37XcX37y/igZtURLF5uaagBFWCIOhxidFsTdO+8vcaTdM+Bj4GYP3kWvdzIhrRShG00eaKxhZpB9PPKdC1miIw3msPyqxgwNPvsmry82dtYBXizKQhDfPRr9+XahRn+Tx/tukXBE/DGrNOUwKwYOmXIAitN14O0Wwy+9/DR8tWMHbIH5q1TjC6/wTgv8A+TdMmNne9tkpjihybOpOquWs1VgRrrn1LV/hoXRHTFv7MXx8eEtD+GtvNF+rSC3GqCWlY44u0g6VhjV2nKQFYc/QLqjWsxFyKbvkOBMGVyfDX0Rfq0mu7ZJ5/GWvWzucJTXMF0E0kGDVVVwL3A9cLgrC96r9bgrDuGUsw/ZwCXaupNV/utZ2yik6x8chFBuZ8vz7geoZAZ+6565VqPp4UbSRr1guh7r0QLUlIwxpJsDSsMes0VsOCoV9QrWEplwymy5h3/c7dc9cq1bRTiIqK5rXPloS699oIUd0vZd2e7Gat0exMlaZpawklRRtFMPycGrtWU2omvNcuK7eB7CAmTMCIEnCqX1EUds5+jV63P4Mhou67tbZUr7Tuk7/hsFUP5XRay8gcNbHZXlohTg8hDWs8wdKwxqzTWA0Lhn6ZzFbMpnwcFfUPLm5LtUp7po9HsfnOvnNai3l93MizPvjrfME1rF0zhav6dmzyGm1y9l9bJxh+KI1dqyki6F7bZLYyfMIk5g+PJilKh8kqB5zqd1RYaS+Vkb9lJRlX3xnQXlsr7qPJclMZ6SOqDRx1kkDvjimtMgAMEaIlCJaGNWadxmpYMPRr5tL1dIpykr9lJW10AIkPUVHRlBw9TNoIXwNaSZKwrnj3NO2q9RCfks5mc/NaeENBVRvEZLby+Buz+PjF+wOuXwhEvOpatylZLvd6RqWc1wZ35umlq3FcdFO92arWjjsLlTlqIn26pJ7m3YQI0TZpin5BwxrWEvq15NdN/P26CP64YTVq58sD3mtr5cUpc3npwSG079y91nNZfl5/NmIhnLLySmIiw5t0fdsPvc9CWso3qq51f9l6wDWrbGqB5785u+z8svVAg+sN6yWRmRTGsO5C1d1eiBAhzmbakn4N6SbSJV5iWHcBa87BoO43ROskOrkdNofc5OtDmao2Rkv5RtW3blNS/e71/nVFGLbCbIZ2UFmweCEle9YgSVKomy9EiLOQtqZf84dHU1jkYGiHMj5ZfZLD059G0hmAUEdfCP+Egqo2RjD9rdyYzFZueOodbupgo2tibECpcX9reKfe3fu8olf1MdkzRaXQ/ny/64asFEKEOPNpKf266Zn3GN4LJEcZgzL1QdOvpCgdSVHJ9AYuzzxJ9JVDGTDKv2VCyEohBISCqjZFMP2tvJm28BfKzCXcfn04JrO1SevWdDlubFFpW+qaa0oA2Fi/rhAhzjRaVL9Kirg+IwbFKXNLVx1Prwq+fp0sUUjbvsFvUNWWuuaaGgC+Pm5kLcsI93Vt6fO3NKGgqg3R1ILL+jCZrcxato47euvokyySaymnW1xUo9b1l3qvL+V+NgYYbckyIkSIlqCl9GvuivU8dKGBcMFBxzgDx0ttDMoMD7p+/W3WWlLvePWsDS7akm3E6SQUVLUhgulv5Wbm0vU4HTa+26/x0xEFiwNEvZOYyLCA1220U/spDDBaIoALBUghQjSeltIvIw4+267w6VaNaKOdciegc9KrsGX061QHF8EO4kLBUf2U5x8lKrxfk68PBVVtiGD6W4HrDu2rH3+jY6zEouGRxIYLbMlx8ud14Xz1TmDzsVoqpR8MLhk7lR1HCn08pcDlK2Va8eZp2lWIEGcnLaFfS37dxLxR7Rm78CSz7wynzKYRm5DEvQutzHhlTMBrtEb9cgdTJaYCH18pSZJIy8gMBUEtgCLLhKmVRIQZmrxGKKg6i5m5dD3JehtXd9aTFOly1+gcL9G/nSPg1HmwUvrrPvkb5SaXM7k3zc0o6aPiCU/u4PN4ZeGJVuklcjYei4YI0VTc2rN0n5Uh3XWkR0kIKOCsDFiDgnkkaS7M46UHa88UbG5GaeeUsRiTqh2+7abjjV7rVHAmHIvmZO3jki6xzVojFFSdATTVTO+XrQfYdNzGxuMq72ywoagaNlkj0ihxUXlgqfNgpfQdtkrSR/yrlqlmQ0dq9QUi9ZFbbKkVwLmvO10BTOhYMcTZSlM0zK09OSYLIipvraus0i8r6YkxAWlQMI8kNUFs9LFafYFIfeRlZ1FiKqgVxJ3OAOZMOFY8uHoxf3mgb7PWCAVVZwA1O1cCpWY6fuLslSxZ9StDBvQPeJ1gp/QbS1MDEVXVTmkAE7KMCBGibpqiYWeCfjU1EFEUBX1UQq1rWzKAOdNtI8rLzKQIZiLDm6fJoaCqjVOX6V1j7/wCNeVralbMG38BhtNahk5qGzNtmxIghY7vQoTwjz/t0TStRfTL/drmaFhdwYUotMaigto0NThqK0d4TWXzN5/wz1svaPY6oaCqjVNX50pj7/wC7YBpalbMG38BRuaoifTumNKk9erDEBZO9oznfR5zWksQNaXWa/cdLyAngLquUIAUIkTw8Kc9QIvol/u1TdMwDYfdVmdw4a+eKhhIYRGc/Ow5z++d1mIAwpJ8a0UDPRI804OjplCUm02yM5cu6ec0e61QUNWGqatzZcjVFzRqFESgHTDBymbtPZKLzSFTXungmy0nEEWBopJSdi39HAwRCKJUvVZJGY/M2Ia9rJj0GL3ncb0It13SCbvdjtNWjj7M931yC0sAUIUyn8dFUeD8Lsl+67BkRUMfFU+fR9/yeTxU0xQiRMvgT3vunLsRVdOYHmT98n5tUzRs+BXdmLp+ORcOHBGsj18n5sI8ALa8cY/P46IgktG5a9WxoW/5wuk4EjwTUGSZjXPf5svnbwjKeqGgqg1TV+fKn6cs8HvX1twp7o3JZuXnHGPAuHf5718f5MDJUrYeMZFb5sQQGYeQ2IXIhAwQBHqMeBy90UjibyOxnqw9sDQ5oxuX3v9yrcct5mJm79lEuSyyb81yRFFArShDHx5BUpdzUDSBgS/9t9Z1ez4Zz8YPnvJbpH66CdVdhTjb8Kc9/ds72JWv0DMlMaj65f3ahjRs2sJf+G3HAaYt/Jm/PuzK/ERHGEGtneF2E8yaI00Quegv82o9njX9aV6cMrfFsmLNoa3WXG38dgZ/GtwLgz444VAoqGrD+OtcUVWNQnMRnzzvasH1vmurK+0dSAdMoHeDvx/L45Nv1hCOQo65kDGTf+LmB56h2/1X0q2ez9LYlHR0XAJ9rhxIwtzPsB7d7nm8QtMw7V6HKjvY+f0c9JJEuz4XE5GQ6pPNcgcwucUWVFUDQFFVAH6ePAFDWDhXPvr3Ru2puYSOFUOcbdTUHlXVKCyx0DfV5RMULP2CxmXkZy1bR1qkxqyl63jyrusCqr1qyrFaU+uz3NeVFplQNZduqaqCAOycMhYpLII+j9RuxGlJ2uKx4p6fv+LquEIu73N+0NYMSlAlCMKnwBCgQNO05vUjhggYf50rE2evRMnehNlcQlx4oueubdrCn/n5tx1NnuJe393g/bdczvy1+9l8tJTdWblc3DGC7VnlzLg1nKd/2Etql+adUwfqf+J+nSEyBqHSRuHBnWiqSs7u/5HS9woEpw2HzY7DKXsCmMxREz1dgLuO5Hs8rWrWYYU4cwnp1+nDXwcfOVt4+opoDp0opHN6YlD0CwLPaE1b+AuCbOOfA408t8Lmk61qKvVp2GufLan1GlVT2TllLIDfAMmtey89OMRz3Jdz9KDHz8q7BiuEf/KzDyMc28CYR64N6rrBylR9BkwBZgZpvRBN5JetB/j9iIX3f7URHl5JTGQYALK2ldHn6Zs8Hb7m3aCmaZgrZBTxf2xzZtD18uH0uSSJHS/fR3yqxoBMkf5dJIZk2vlx7gcMe+L/mvyZAm079n5dTYFJv/ERVNnB/v2/8fDHv5FscPDMkPPQNK3J+/JHyMCzTfIZIf1qFbh15sP/lVJZWa1hzdUv77UbysjPWraOoT0kru2iY3B3mS+WrOXJu65r1ucKRMPq069TxZlg4BkIeUcPcHLlB0x+rHl/rv4ISlCladpqQRA6B2OtEIFRV33Bp399kOETJvHBkFTGLqlgwdvPoWkawydMYnQ/1+uaMorBfTdYWGLhi5/2suFIGZlX307HvpdiMLoCt5/mfsANHRVWbi9i9u0GwnUCD1+o5/ZFc7lh5FiiYuN91jzVX2BRZ8AYFcc1j/8bi7mIf/ywgMKiEgoP7yYpsw86SaCy8ATg6hB01zc1pqYpZODZ9gjp1+nBn4Z9+844TGarj4Z9+PKjPPGvT5qlX+61G8KdpXrogjCMEjx0oZ4lByqZtvBnHhh8Ra3Xt6YgRJIkj9u601rsCdgaU9N0Jhh4NkRu1j4Kf/qEKY9fjyQF3wYjVFPVRqmrvqCu9uSG0t4Ndezlmkp5b8kOjlVG0HfQGAbc2hlB8PWVOrhtHb8eNHF3N4FOsSJ2WaNLnMiQTDtrvprBoDEv+Ly+qV/gvOwsFEWp1T5cWmTy/LohgYmOS+Tyu57gu+++o6RC5sTXn9Cuz8WkdL8AQRAQk2LImuW739NJKAMW4kwjUA1zN940R78CZf4Pm7m5q0SnOBGbrNEpVuTmbhJzV27yG1QFU8NKTAXkZWeRlpHZpAApLSPT82t7UornWLG10BoC0G1LPiO29HcmPXYdotgyvmKnLKgSBOEx4DGAjybcw2PDrjxVb33GUZdZ3oP//AxLqZlFI2KA6js6Y3gUhcX1p73rErhjecVMXrKTE0osF9/+J7olJNW5r3v/731ev/8avjuosPp4BaIAqgZlDiD/u1pBVVNRFAVjUsda7cPe7ceBCowgCMT37U/cOVdRun8DOd9MJ63beUHZp5tA/a/qI5QBO7346Nffx/HYjT1O847aNjU1bMjVF/DitK94/ck7ahWTv/vuEY7FRTJnl+9NRSD61dg9FZVaWVahsPp4JaAhCWC2aShS7Rua5uBPw3ZOGYuiuLoLW1OAFKyROKczC6ZpGr8tnMpd3RQG3XZ1i77XKQuqNE37GPgYgPWTg1vI0oIE6w4omHjfyV2bUcGAce9y+7UXcDjrGHefG17rjo72vesVGn9BWkSYnmlLtrHNJNHvzqT+HdAAACAASURBVBfpGZfQ4L42LZ9HRpRCdqlGdJgeQQRVBZuikpKY2uD1p4Oa3TeapnH42G4kuZKVW7K46aJqcWtqtijkf9X28dGv35dqFGed3g01gtasYYkREuYSM8+/+yWVJfl+s1LP90+E9hfVqWGNcVNvaE/tozSK7HpMlU6SI0QQISYMCitlSiyVQOv4+blpjI1BUzNFbd3/ylpawoYZr/LUwF70P7dLi79f6PivAYJxB9QcagpizbZgQXUiVJYxa+k6MmJEZmy2sOSwgKpp5BaXk54QSccGhoN6B2mDu1YyYerXWPRJ9L71Kfp37dngHi3mYr7497NQepK3h6bwwrcFzH+iDwmRLrPOKWtMHEjv3+SfQU3hKDEVoI9KQAqL8Hmd6rTXMssD0Il1j7+pT0y++X42X0xczhv3X0775LhQtihEm6Q1a9jMzaXEGFS27cviqzEduHumKys1c3ulR790kljvgOPGuKnXtT93ln/2/R24Z46JO/vG8tcbq2tAJ64uZeGPm6F903QsEA1zWIo4OedF8rzMj6Hp+lWTs6FeqiaHNv1I6eZvmD72GmIiw0/JewbLUmEucC2QJAjCCeBvmqbVdl5sYwTrDqg51BRE77bg3FInK34v492BRh76phJJMBIfoeP26y8mMsxQNVz0qoCyVPOHR1NcobAnz84vOUcYN20K0QFkp8CVoSo/so3bzo2iXbjA9Z0lbph8iKhoV+BXWmzCpk7jxxVLfa5zuwY3RE3h8G4j9kbUG+s0zGsKF9x8H87r7uJPs97mijQ5oE5BfwaeuaYyIpPaNWkPIVqeM1W/oHVrmFPR+GpHCR8MDue+ReXM2VKKwykz8IrzSY6LarR+QdOK2GcuXe/J8vdMSWBAJ4UZm8v4Zr/T85rcIguV8lqU8D0snFetR8HUMEN0IolDXqB95+4+j5/KoMdf5qvEVFBrJE5rR1UU1s58nas7iDz29MBT+t7B6v4bGYx1WhvNvQNqLv4E0bstOLfIwp09RWKN0DlOIKvITmKEwBdL15GeEMnHAQip+zNuz3WwYGcFLw3NJGOzmU3L53H9yLEN7tFiLmbPTwtJDNcY1sVJYno3nrhZZXVpKfe/MY+o2HheenAIPf0EQdveHNnqHXj1RiPXPfwyh3dtwlT8LTZLCWHR8XW+vq65hn1OsZFoiMA5U/ULWreGvfOLiTt7iqiqyg1ddMzdXkaHGIGZS9fTrV1co/QrEDf1uvb31Y+/kRiucUsXBVlR+fOAdmwpsbDg7ec875s5aiK97/4Lxw4dIPGiWzzXtwUNawz+Ml+uAPDUGok2h2O/7+Tg8o/561396Nvl1JedhI7/6iAYd0DNxZ8gutuC9x/LZ/DTb/LIRUY0TaPCqZEQBn+7xsizK2xcnCzRMyWhQSH9ect+Nu8vRdAZiImO5NdPXXdeUXnrAgqqNi2fRwdDGdd20dM5XqS8tJjEhGSGdoeNy76sd43YxKRaBZjuc//6iiK976bMhXloVe7Dmlq/YV5z6HruxRhik/l9zTIyzulHYufefl/nr+4qx1SG+ZO/NcuhPTTCJkRjac0a5q1f8WEwvK+Bxb87+fTWcG79soKLEisC0q9A3dTr21+y3sbVnfV0jpcwma2kJcYEHJg1V8O89UtVFQq+eYsiSXdaHNG99+5NiamAPdPHN3s/LT3GpqzYxKbFH3Jxoo1Zz9+EXic1fFELEAqqqqh57t/cO6Bg7Kc+QfzL1IXc21dHuE7gky0OkiIEruyoo3eyyKCuErLTwaEThdx7QRz3LvQvpFkni4hpl8mTT04irbOrdspiLubLt//EyAn/aXCPFnMx+1cvJl528uVuB7N3OjBVWIiMq0QUxYADM2+8z/33TB+PYqsAoMR02CNS3i7ETXUUbkrRpqTT027gExRsXUZJzlK6XTm41mv81V2JxwvInv1SraCoMQFRyDYhRH34K0ZvzRrmrV9mm8orP9m471w9mfEit/fSIzucOGWVq9Jt/OnnjXUGgv68p9w/i6LS8oAGMRudMnN2y8za6aCwwkJyfCWiKAQcmNXErWHe+gXVGuatMadSvxqzd2/ysrPImf1irYCoscFQS9om7Fi1ADVrHe/ecwnpSbEt9j6BcNYGVf6CKO9z/+beATV3Xxf06FCnII4efAWb9xzhWLjGt/tlSioU0qMF7u2rRwMe6qfn6eUOTGV2osIr/Qrp/uxC/vb1fq4Z+y56Y/U/7puWz0OXv6tWlsk72HKbeG5aPo+h3WHc1dVjaFxF6bc3GEzVbNN137GpDjvFr7uKzTVVQZB0SJFxSNHJni97MGoMmlK0GRUVzZFPnwXAWWnlxJpFJMTFkBoXUec1AL07pqC2Mt+rEG0fbw3zV4x+OjSs5p78adi0hT979GvZIYUKh0q5Q+WTW8MotWs82k/P40ts7D9ZSqxOpn87rVGBoL+fRX1B5wvXdPJcO3F1ab2dhm5q+ky59UsURGSHjeLX7/HRL8CjYadTv5qSKUrLyKSiFfpeARzft4WDP8xl9JUZDLr5xlreiaeDszao8v7ijR58Ra1z/0DnSbXUvhadKEAnqH4FEeD5/om8cE0sE38pYs3BUi7vIJAWreOoWUEUNC5IFRixqAJFqyQ5PoYOXkL64fLtbDgpcu2j/0TSVf8VcGeept7enqeWLOaSW0b4BFA1g62D29axrcDGvJ0nfPZYV4bKLUQAiiyjaWC1yUhhEeiik2j34Hscn/4U7R6aAoDqdCCX5mFI6sjJTxv+8/BnmGcuzEMQdbVS8YEWmHpT806r3FLKxhmvMvnhyxu9VogQzcWtFXXNxTsdGuatq3UFdbK21aNfALd/ms05CU7ax+g5alYQBLioncS1HxfQPlZHcaVK39J9AddI+SvMD0bQqakqJcUmbEcPosgygqjzaJiqaXR+eiZ203Hyv36Tdg9N8dEvoEENq8vw01JcWEu/mnIkdyaNmTEXFbDj6484L1Fm1tPXoDtNR33+OCuDqppfvHKb47QWc/rfVwUL3h7vN3196/gpHjHIMVlwOGU2ZMO7/3MgiiKaqiEIGhd1COfaHrE+d17vLt5EduKlXPXAIMA3A+XOPHVLCWdo93JPAFVXsPX4W7MC+lzuOyR3GzGALDvRJ7Sn3X2vB222lT/DvLo6Bf1ZLwRCzbS7qip0WvUr3donsX36s01aM0SIxuKtFffM2cCdfSNamX5t8in09ubW8VOYs8vkCWaycivYcBQ+3VbqcbnWVA2dJLDjT12rskf+axjrKtvw/ln4u2lubNCZFG1k26cvU2ZXqdi/AVl2Ikg6j4YdnTy60T+vmtRl+OlPw3KOHqRoia+ZcKDUd3TY2nHa7Wz+bgZhJQeZfN9lxMfUf0pwOjgrgyrvL96gzAo+/X49ax5PA05PMae/fdUnjvWJgXtu1vzh0SRF6TBZZc/nmb5qN6a0q+h9WfWa7gzUmkWfcnTTCv52j+vucWS/WO6d5wqg6gq2AsH9BS4tMqGqCg5LMQCaKiNIOnJmv8jpStjWrHkA1x3i6+NG1nlX5y/trtgr2TfpARat/b3F9hoihDdurchM0HNDhhMUV/t/W9cv8NUwqP8z+TtxqHldub15N82XjJ3KrmMmnJWVqDpDlYZpaKrqyUKdDhxlhZ7GHDcN6Re0Tb8qh93Gpq+noy8+xDOD+nBBt+tP95bq5KwLqmoWT97SFWZvtnvOYk91Maeb/cfy+WjhKn4d6/IDaao41lXHcP9rczn3jmfo8YfqqdzeGah7v/iS+y+KJbHKsDMxUs/Q7niCrZfvisKUc4x7zk9l1ELfo8H6cH+Bc44eRIxJRdAZAMj97FnSH3iP3M+fQ6pxDq7JDvevPL+v66y8OR0liq2Cdg++5/OY3XQc6w/vN3itN5IxnIiEVFYVJqJHCXXphWhRvDWsqNTKQxcaeHp5OU9epbR5/YLAC+xrnThUBU/e112bAYtWbuCHR1IDKnz3h8liJ3X4Pyk/+D/Czx+MGBaJpqnkzXwB1VFR6/WCIFRpWLV+uR+vSbM64gQpKPoVtP20ELvXLqdg8zL+dveF9Oo04LTtI1DOuqCq5hfWgJPrOotcPPkECdHVjqunqiDdnbb+y9SFDOkKOCsBPUlROgZlwoCn32XV5OcDFoCadQKappFb6iAmJY7bvQIqwCcDFSVUMnOjwvw9Dp/XOLTvuP98A+GyBbuzkjDZwtDugt9slb9idgBneSnWH6YSOehP6KITG/wMeoMrAJEddlAV1LJ81Moytr5+t6cY1LvGoCkdL6Ig4rQWe2oY3EhS08/mLxw8muJje3l7xLl0SgvMODVEiMbirWEHS2wIApyfio+GtVX9gsBrnWpmxmb+tLVWHWqxpZI7ewokRenIKyqrt/C9vnE+is1Kxe4fMfS8BjGs/s8iSTr0BqOPfgHIZSa2vHFPUPRLkiQ0TQmqfkHrqbvSNI0dPy3GvOdX7ro0g9vGD2wVReiBcNYFVf6/sJH07ZoU8Bl7MGZpudPWUxf8zKZ9x9iyN4usCJi/N9/T0ltWbqPSZm/UXWfNz/Dxsq0cTuxPt4uu8Ql6NE1j/+rFnuO+BY/35t551Yad4AqS/vPwTczZamD6L8UkhAsUV2YTGZdEjJ9i9JrF7BZzMXZTNiUbF9FJOU7unpXoLhsJmoqGa6yMpsjYK8ygKBx9fxSaInNsSnV9giiIxCYmkdG5a620tbvwPfvLlxstUrGJrsHQNd2LAZo61U0URa597J/0G9mfiDADer3e53l3tqop8wNDhHBTW8N0gC5gDWvN+gUNlzc8/sYsv4OXlxyy1KpDHfTsJObvyWbl8TyKSy0khIt1Fr7XLGZ3v5eiKDgPriPaacK59wf0l98LmusfflSFirwsNFXm6PujkCQdmlqtYW79AtAZwrjwz766lJedRfbsFxs9rDgtI5McUQqqfrlpqOaqJawc3CiyzPYfFmI9uIFRV3bm5hdOfW1gcznrgqpgdMQ0d5bW/mP5TJyzioWjUxmzcD2xOieXdgxj8Zj2npbe0YOvYPiESXwwJLXJ4yVWbjnM/4pjuGyAayq3d9ADMLQ7tY77vDNQm5bPo3O8iJzcnXuTcxl3dRJv/ZDPogMC973keybvr5h90/J5ZETYsO5exh9viufprxYhdr0CfVwaqApyqSsDpY9NI3nwsxTMe4X2md39fmnd1OwgdHfgVOI6VlRsFT6eVubCPLa9OdIjbG6ioqLrfZ+mojcYEaMSoV1PMq8eTHhsdWbOfSwYmh8Yojk0V8Oaq19uF/JIQWHW8nUkGOQW0a/69u5v8LK/I8IBl/TGXlpAbEoqD1wYzgvXxPKPVSV8fcDi42Plr2vQ/V42q4j+2AZAhaxfcfa9GSk82pWFqizDtOQd9HHpKJZC2ndyDez1py2aKvvoF1R3QXvrF/j68lmKC/0eyYlVpqHBpqGaq5aoxyopyGXvqjk4TUd57Ibu9B/S9oIpN2ddUNVcgjFL6y9TF5IRpbDqdytG1cZLVxr5zwY7ReVK0IorD58o5LOt5Vw35kUs5mJm/fs55JIcPqoKerTwBLYV122H4A6S3h+WzugZG7n56l4A6FQHcZWFrPlqBoPGVPsu1SxmX/3VpxzbuILL2uuINNrZcaKcHnEyh9dORz/kZQRJhyGpI1JUAh0ecAUZqqb6fKG9BSj7y5dd72+3Y0hoj85gxOmwI+gMSFHxHjFq9+B72E3HPXdwxw/uJX/+KxQV5Pp8zuLcEySkd6j1PoDHe8bf3VcgNQeCIJDSfxQHf/mcvoNGIer0tV4fIsTpIBj65XYhN1udOGwKL10XHnT9qrln7+zUB3ckceNHrsHLc3b5Zn29jwjdn/X9YYncNuMwbz6SAVQPoZ+26Gf++tAQz2fy3q+3TUX/qcfpnqiQVaGQGVXJ3r2r0F16j8uDKiqBjDGTAJdhpzuY8qdhypwXcdjt6BPaI4BHw2rqF+DRsOxD+3DMe7mWfgmAJOlrvQfUr1/QeuqmNE0j++BeDvz0JT1inLw59HxSE3qd0j20BKGgqpE0d5bW/mP57Pr9MF+PiGTUojKG9pDIjBcY0kPH55vMvHBtok9xJfgv+qwvhZ9fXMZL83dy5SP/AFwBj5S7nbQYI91SUhnavZwD6f3r7eBb+9UMnOZcwuVkRvbV8cOeYuIvTmH9oRLeuyWCx1Z+ydV3jCEqNt4TgHl3Ds6bPo8bu0ew7aDMv64z8NJPJUwbFMYd87dzfNoDiKLEyU/HIejD6tyDoihoiGiqivcsY4c5D1mUkGKS67zW3dkny04EUUSqsnIQDBGkjfw32VNG+bV6AAhL6kDmI+/4FZ5AU9z6qHgSrhzB7z8t5JybztjRciHaGM3VL3eWSrLbee16I08trSAhnKDql789e2eneqYYeb5/YoMmndMW/oK5xIwoC9zbV8eSvVZGR0j8fKicSbeEM27lBp688zo0Tat1lHjjdJdNRWKERJxRw2yp4NaeEptyLIhb5nJi81LQNHRx6dhMJ+ocOOyw2xDEqpuqKg1zmvMQBAFdct2dg4VLJ1GkKciyExDQRbsy7W79chQcwbTglUbrF5z+uqkKSxlHdqwlb8sKbuydxAujz2uV1ghNJRRUNYJgdLi4xzP0TJK4oYuIiEaMAS5tJ/DgtyXM3OmkrMLuKa4E/6ntulL4qqoy/tM1XPX4fzCGR2AxF7Pvl4W8do2O19dWUFzu9LFL8NfBZzEXs23ll3Q0yizYamJwNx2jF+fz4Xozd/eEhDCBQZ0cnqNCd5bKfZQYFyYyKKOSPTkOhp0TzsY8laE9dZyTHs5956u8v1lGTspEc1SgOW1kz3i2au8KpUUmn71oqoo+KQMpyrVPXXx7BJ0OuSin3p+zu7PPYbchl+Z7Wp9zP6/2xPI3KiKYGOPT0KX34vjWX+jY79qgrx8iRGMwma1889NG3r7WiaxEN0m/3FmqKzrpSI6A+87Vs+KQkxsz9UHRL3979pdtamjvJrOVuSvWk2ZQmbW1lMHdddy/uIT311u4vQckhAsM6CTz+ZJ1AD5HibFhosemYubmUnolScSGadyYqad9nMjyA3bKyozIhmhQZQq+fh1B0qFYSzBbVOKS07x2IqBPqsqQiSK6+PaIegOOGgXmNVGdNjo8MtWlXyUnMaS4jhW99SsuOa1eL77WxvEDuzj461cki6Xc+ofODBjfdo/46iMUVDUCfx0ujZ2IvvfAUf54q57fCxyMPk/Pk8tsDOwO6XFGxl0dA+0v4petB/j5pIk/TPXf/VJfCn/68q2kXz0SY7gr8t+0fB43tS/ngnSJW3vqmb+pgCeube9TP1Wza2/tVzOIxsrUoVE8tbSSMTf05K5Li/luj4Unbu5EYqSex1OcnsCsprO6rdyKTnFil+1sz1YxiBrdEyWu6Gjjpq46vtheTr7NQrsxk1GdDkS9q57AUXCEwoWv1vnzEw0R5H3xAoIooZSbEcOjESQdoiGi6s+k9RHf91ryfv4cS8GJhl8cIkQLMnPpevq3dxCrUxo9ONjNit/2cfBkJc//wUiEDkaeq2fEwkqG9I4Iin752/OQbiJJBocn2/RCirHW2K6aWa9pC3/BqNn5cGgkY5dWMv7GDjx4SRlf7angpZtTSYrS8edkl4efMTyKwuLqwv+ychvIMr1SbFjsKttynEQb4MHz9VzWXmDa/xREuZT0R/8L4NEwpymbgvkv1/3DE0SXfkk6FGsxhuhEZNmJFB7TavWrOWiahik3mwNrvkUtOsr57cP57KGLCDOe2eUQoaAqQExmK5v3HCErTPPpcIG625f9uf0+fkUCV54Ty8HsApyyzCUddIz6ViEhGsBOu/wDDRai1pXCX7/nOL9ZUrlsoKsw3X0sN+5qjZIKlcvbC4xenM/MnTKSJHnqp9wF7Ku/+pScQ3vJP3aA2ztLoKpcmAo3TD4EwJCuCn9akMV/7s5EURQqC4/xw5wPajmrfzRhFNaC49gsFtqFl2OXNWQNnlxSyfQx53Br7yw+3ZnnultTFQTR/ddQoz7a3/c6lQXH0cWlUjjnz6QM+5OnliCvqubKbjre7LbixlKzW6a0yNU6LWiuu1ZN09j82zf06pSKKIohH6sQp4VVG/ex+6CFb/aIFFdWBDQ4uKaGDby0NwM7VJAeZ8ciywBc0kHHiK/sJESLzdIvf+/tPpYrKSlmYDdXtmnmTic6yVWk7R7b5W0A+vgbs9h/NJcBnUUUTfXYTSiqRpToRBAEckudjJlznGu6xhPTtbfP+7smVpg4aYccUxkJ4VDugHHL7Ex/qBu39jnCf7cr1dmmKg0TxPoLx6XIOJIGj0eflMHJT8dx3rgPPPVQ3voF/j2tWoqG9MuNu+aqvnosVVUpyM7i0LrvEEpz6Jsezms39aJdUuc2Y4nQXEJBVYDMXLq+et6en6GbdQ3srH9Ic+Naod3v4881eMTAS5m47Heuf+otz2vdx3I9uncFIBUYXeQ78Ni7a2/M3HlE6WREWeKJm3uTGKnniRQnq0tLUcPiWXL4AMl6KzdMPoTssJMWrrJ11SJuG/t/Pnt8/K1ZruL4F0fwx74i/7eijBcuNzB+hY2hH2WhN4ahE8rRx6XhLM7BrUWSPgxBU9n25khUTUVVFdBcYiTow7CZTiCIIs7iHJzWYipWvAu4REB1uvy1Ts550bWYBhW5hwE8hqMtRX3dMu5RE8f2bCHu4Nf88a7LWnQvIULUxYBLejOgfYVfDaurxqluDYPqfz4aZ+fgT7/qckt3H8slRaXQHRhnqr3v4RMm+YwcK849jqLASzdnkBSl46VkmR3zLRSWOdHLpVw8+QR2h0xymMIn/yvholLfoNL9OUxmK5eNfhVUkacv1fHfbU6GfpRFWGQUerHM1cUMXhqmIgoiluJCzxgsVVE8A5UFfZhLv0zZKBVmn+BEJwo47HZf/co73NB9ZlAIRL/qQ3Y6KDh+kDWz/oNkyeX8DlG8PbQ3KfE9WmK7rZ5QUBUAgQiBP4+TlhjSXJfb8ONvL6DfA//wGGdCYAOP3YFXl0QDgzIqsWs61h61ekbHuK0WdsVdgmgrYeqQSB5bVERxgYWPhsVw97wyjuzdTpdzLvB5jzWLZ9A/oZBD5dHcd4GdLnEiYy6KYNoOPe0e/Zgd7z1M3uwJaIqMrkZ3XGxiEpmPTCYvO4uTi/5N+uiJaJqGXJIDNYbauO+QjDc+gzGpIzmzX0R1VKBYizEtnYhLlQQEnZHEgU8hl5nInjwKTXF6xjmcim6YTn0uYsOuNWw7lMuF3dKDtm6IEIHQkIb5q3FqCQ0L1C0dAjMB9Tdy7JOhEYz7tsRnSsa1GfDVHidzR3fkwUWlZOeX8Ont0QxfUIHdqfrYK7g/+03PvEfPZD0l5SqjzzdgdcDn+/WkjplG3nsPY589AcBHwzRVJjo5zROkbJv0KGn3u7qbnUUnUN3u6l465q1hphUfePSraPn7aFXjhwRdmI9+gYahygfvVHfzaZqGuTCPYzvWUnZ0J9Gqhb6dEhg7rAcp8f5nNJ5NhIKqAGhICOryOGlql019nTH+hMbmVChVTdza41yf+qiax3Lez7l/7+7aqygt5qELDTzxnZXL2+u4YfIhoqKrv5RuZ/VuKeFcn1LMbkUk3qhyZy8dc994jv+b+YvP++z7cR6PXqPw+tpCZt0RjsUmc39fgS93Wjj8wWNoQNp9b3lai914G3+mZWRSFBVL4Zw/I8tO1EoLhmhXl0tdHS7OkpOkDP8HKK6jCbd25c9/BfOKyUSkdvJMdndf++KUuX4N76xWS4NztBrDpcOf4e3JzzFnfCioCnFqqU/D6ho43FQNa6x+gW+9lfvamgGc93Pu3/sbOWZAV2tKhttZvWeKkcuSK4iQRdpFCdzWU2LR3iwfewVw1WSZCgvJVQVWjY4iKQKG9dTx7f4yH/0CfDTMW78ADJExFM75MwD2skKMVR3L4SkdyayhQwCqo4KkIX90jbYRBNxtz/nzX6Fg0T/q1C+ofYxntVrqtVZoDLlH95N7YAe27N2I9lK6JEfy9BXd6Hz9mV8j1ViCElQJgnAzMAmQgOmapr0RjHVbCyt+28f2AyZmbq/0nOVDtRDU5XESSIrbH/V1xvi7U3x8yir6jn4NqO1q7k3N59xZqrgwkWKTmW6JOob20GEW4kipiPS4q1vMxcz8890MTLdQWR7GHT0EtuZAvkXhyYv1fPXFCZ9s1ZrFMxiUUcmOAhjcTSAlQsMgimiCxP0XhvPRLo0Kh+QZ36B47bGmoZ1bQNxT2c8b94HP8+bCPDRBRPj6TQRJ57mzQ6rxV1uj1rXenIoho6IoknDudSz/7QCDLj07U+OtlTNZw0xmK5Pm/0x8pN6vrxNQK3iqazhxIBrWWP0K9Nqaz3kHik5ZRVJsDD9H5NcTEi/dnMGO+RYWvP0cmqYxfMIkXrg+iv3H8rmth8jmHDhSIjOsh8Tqo06+XLaOJ++8jsTYSExmK9O/XY0kCoy5QEfXeDDqBLrES9x+joE5B331C6o1rC79Atjyxj1+Nchbw9TKMjTZDpJvoCKIIqiyz3o1CZaGWY/txmktprI4j1+nTSAtEjISI7j93Pb0uflixAZqx852mh1UCYIgAVOBAcAJYJMgCN9qmra3uWu3FgZe2htnWQFDBlzld15UXR4ngaS4a9JYc76lvx3CmHkpYRGRfl3NvUfO1HzOfTz4+YaD6JQKovSgaQopMaUMPSfSJ/i6qX058ToHJXnZdEsQua2njh+yZJ642MB95+p9slU7f1nCLouTcrsCaEz6nwNRBEkUSIrSEyUoRIVF+R2xYPdyPneWl3Ly6zdpd/uf6/z8miCSNuJfnmHN2ZNHYUjqiKaqrhe0strIc68dxkfvvUD/8zsTEdaytV4hAuNM17CZS9fTNV6sU7+GT5hUK3gqt9UeThyIhjXHXLS+a/095531Kiu3oTrtROo10mOsQf5mrwAAIABJREFU/On6ZM9+wRU0CnIlNruDrgkit/fS8f1BJ/eeq+fuPnqWH7Tx5hffk9m5E58u24SqKDhl+Hirk4+3OKvrPkWB2EghIP2CxmtY3uwJIIi+GgZIUQko1pKAfo6B4LDbyNqzFTSoNBeS/9MMlIoyDOGRxCS3Iy4hBUd8DLOe6R+09zxbCEam6hLgkKZpWQCCIHwJDAPOCEFqSCT8pdUHdFKYsbmMb/Y7fdYKZMhpY1LuiqLy+erDDHje5fNU09W85siZms+5jwc/mjCKsryjFJpNxBkEdufa2Jl3HAfT+HHFUpymY4RpNpbu1mGyOpGqAhVV0/h0m5PYMAFL+QmspSVomkZshJ45Y84lMVLPgYOHGTW/FF10IpIk4Z7prlmqBaIu4Sndtox2FQco3boMOvyh3p/b6aQxNQ2iKHLRXc/wzlfTGXvL+Q0aIAZjTluIBjljNawp+jWkm8jMn7agE7UGhxrXpDllD/Vd6+8576zXoGcnsfvgUYySwI5cB+mvHgIBZO0HAAyiwsSfVWRFRRRAUTWcCkz6zUGYzlUovnvZRu6aMBLZuY4fnvevXwBOAtMvqKFhpxDFXklF/hEAHJVW1sz4FzpJxGkpIiXGgCI7GXx+Okajjh/TYijLWocOUAHzETADyTF1GzO7aUifzkb9CkZQ1R7I9vr9CeDSIKzbKmhIJPzXCIQ1qqPPTWM6YwBm/biLTtfcDfjWR+UdP8K1iU5mzJvMqmXfABBhOc7Yu6KBJG5IKfU8J1Ydk8nlMvf3MTLmQiMzD0bxrXQjSVe5nMBNa+cyyPwlKYnx2CzF3HdhFNFCJe1jRN7dYMehaORaNP754EAQRe7rXkF5rolyTSNSp2dILwOzDyroouM95/veA0S9hUdVZFRLAQemPUZURQ5/HRTOM8tnkev8ivA6HIglScJRXG0GqnnZrwu0fANNY+sV0jpm8tMykY8Xr27QALE5c9rORkFrImeshjVNv6BjWsvrV13X7jtWyB8SZUbPXcmkJTsAECz5zLkrEohldL9ILp28iklLdngCHau5mBs66RjSXceKnCj+J16IFJeG024DIHfD16SGi5yTrHJuqkSYQUfveIWdBQoJ4QJPXmzk5R9tfPH684w6V2y2fh36yHUzG2E57tEw1LqDFLeGaVW1oN4a5o2z3IyjtND1a3sla754C1FzHT5WFOVS+JPLOwtVIbFjNwRBJExz8N8x56HX+bea2T79Ob+PB0JD+nQ26lcwgip/Byy1/kYIgvAY8BjARxPu4bFhVwbhrVuWQEQiGB19bhrTGQPw0958rhpXPSzZ7WqeU6CQ1rEzd11czLfSNQDcqvxAjMEKQIxB466L0/hWuoakq0biLC/FPPd5buvl+kLfdX4M3yxchbPfLa5p7L+v4o7rDLy1oYyTZoX/bi5EEKDKpgtFhUijhGQwoo9LZfGxkyw4LKNWlmGIDgeiCEtqR8f73/ZkdNzZHVWRfYRHMibQI0mHnNyOQckCV/ZLYky5iY8323EqFex74zb08emeYFAURNIyMj0/k3xVIW/m864CT0FAUxX01nxURaiVTTrVs668ybjiNj7796N8N7ruY5Lmzmlr7uDcs4gGNcxHv/4+jsdubP01ca1dv7yZOGcVF6cqFJYrHCh0IsSm0j4hn4OCq3yhW3wpX2yvZM7vhUSH6UkKU8gyJBDVqQ+Kw45a+gMXZKaRZy2kT1oYK9dvJnn4P4lN746zvBTH1sV0STRgKnfwwxGN/LJyj4YpKszdA1a7CprA4mMxzdavTjcMA6BH7mIfDTv00VicJbk+GiZoGrFGCYwRmCWR3C/+iGisGtuiuY4ARWcFiiZw+INHMVZ1/SWF65l8dyaxUa5i/NU//cA5N9xV62db+Gt4nQFVc2hIn85W/QpGUHUCyPD6fQfgZM0XaZr2MfAxAOsnnwL3jebTHJFoCoG0ELvZsj+bsE79PL/3tk8wF1nQRbn+CCyWeWiayizByax1KqK0DVVRECQLUrut5HW6lIrtSxnRRSZGr6LIKlE6mSGZMkurUtbDugvEh4vMfNj1D8m/v97Lgt8FFKcdTdNQUSjXxaIao5GufgxJkhAUxVVY/pT/4nB3duenuR94hOfBsgK+2FLKXwem8dTcjdxzo6s9d2S/WL47WErnS24id+0cClSZce/NJSo23nPH6E7Bo9OTNHg8mioj6XTYd31Pu5M/c8CkMO4f7zPthXsY9+58UjO6+OwnKirar+C1VOB1fPdG4gwqmYmGOo9JmttB2tzBuWcRDWqYj379vlSjOOuUba6pnE790jQNh+wKCJKO7MJCJGpV9qW03IZViEJXFRyoisqqdYdxVCjM2m2m3FKOGFZAmNGIvTwLTVM5JDg5lK8iSpaqazT0GSKp1z/E0aVTuPeCSAb1jsBRrMOQEEFOmZP5W1cQObg7pduWMayXjlfuPMez139/vZcvtlWgEyVUFPKVKNCBGJ2EbtD4ZunXO7d34PGvFyKJ8LeRrm7lG7tHMH+XnfCEOIqs2VhLc7l96ECMYUa+WPQ92vHNnNy0nB5XD2P38i9IvedfaKqCQSdh27WC9JM/sb/AQbzOycr3n6J7RkqtPcWGS6x7fRTR8Uk+BsgtZSzckD6drfol1JVmDHgBQdABB4AbgBxgE3Cvpml76ryojQRVbmfdmrRLaXxqvC4uGTsVk8Ve6/GkaCMbP3iqzuvu+88yrhw7Eb2x9hfGexbUzilja00+9+6kyzl6ENvPH6Kz5qGUFwMgRSagqTJiQkfKcw5ixA6qiuj1RVWkMIS49mQ+MtnnPdzvA/jt1jv00VgyO7Zn5IT/oGkas14cwZx7YkmM1HPsRB4Pzctl8LlJiHYzD1+ZRnSCqwV58i8uwfrk1ggeX5BHZv/h7Fr7Pbq49jhkGdlioqPezAGzhFMXiSiIRMfFEWE5zvuDwnluhZ2EzPMQTmxG6nI5j77+ea2f209zP+DYjzPodMOYeodNA7VG+zQGtzHqs5eIhKuVXN81jOFVnUrehbnuAuKkKB0mq1zrNfUxcfZKyNlSp1ntKSe1L3S9rpW1DbhotIa1kaCqpfXLUm7DWmmn/3MfYSqtcHnJqRqCIKJpKhHRsdz1wGOASNdLbiQswvX3VhAEdPq6mzQa0i9wddJd9Jd5ABz8+CkMdjMAssXkGT7sMMbhEMMQCg8Sptl99EsQBGyqRM+/fBt0/VrwRF/GfZlFhKTQKTGcaINAuE5jV66dbXkyHw6J4KEFJipUPes+mcB9/15A9skC0vQW8pzRmG0qqiAhigIpsREIlnzeHxTGuKVW4sMgs1t3Fr9V+9+GibNXsmTVrwwZ0D+ghqjmHK01pE9nnH6JerjsiYD0q9mZKk3TZEEQxgErcLUjf1pvQNWGCGZqvC5MFjt9Hq3dJutvlImbghILYlIXvwFVU4i9+98AnPzsORzWElIHv+rJ9FR+/SZxoyfiLM7BYDSiKAqK7KRw/itQkIvu0D5k2YnTYa/lOeUPtcKMLr+Yjcu+BPAcWSqKQoRi4d5zDczeVYQIfLY9m8i4SkRRxFZuJUpy0M4AD/UzMmnVfDpEa+iSz2HEn99l1osjmDoknaeWlHusIJbNeIfOh2Zx+fndGGUu5L9rNrN0VDR3zd9IfvYRUjO6eIKjoY+/VGfnpD/qs65oCPdR7YDecfx7SQnDz4/yO3C2qVmG5tS2nI2cqRoWLP1SVZVSq42th/PZddTE3lwLhvBoSp0iUckdKaiAbk99CoAuLMpzXdb0p7ngphFB2UN9RNz0HMaqgelH3r+PtMGvAmBUZSxL3iH98Vk4i3Po2K23ZyxM7uwX0QQ4XqVfDrsNQRAabBZuSL+uypB45PP95JjtWO0axyySx4anrNxJhCjTPtzBuakS2aUyz0+cx7LXx7hc4YdkMHZJhU/g8c//LqHsQDHXn5PE7YdtGCSBxb8f5mB2Ad0zUjzB0etP3tGozE5zj9Ya0qezWb+C4lOladoy4NS2N5zFzPppDz37P9Qyi6sypqXveFyC1UoLzuIToIHDbq+a0ycgRcQBAoaULgiihKAzoMkOZIcdpWommCw7OX5oX/Xaso1wRwl/uSyGV1cvRgtPYFux68jSNYS5gpgwkQ5xRmY+3Jspa1wjdS4edA+zXhzBxzfJxOotDOtlYPrmCt6+KZJHl2zkx7kf1upsvHjQPez7cR5/ukmjvLSYWzpWsMio8sqPFQzrKfHttFd59PXPPcHRdx/8vc7OyZrUZ10RCNVHtScxlzn49WguRr3kc8zbmKPgmpzqY58zgZCGVeNwyuzIymf9vpPsya2kUtYwJnchKrUzGVeO4IKoaJ/JDYsWLfQJpk4rGpiWum5SNUX20a+cowdRZBlB1KEBUkS8S78kHYJOjyCIqM6m6Vel1YLVaiXSIHBeuoE1L3XzybC4MzeTB4DsqKDUpvGv64yM+z6Ld+as9HtMZjJbWbRqAx/eBMfzirizt457F1UyrJeeCZMXsPitpzzB0Z+nLAj4qC0YR2sN6dPZrF8hR/U2hiwr/JZt48Zhrpogf07g5sI8tr05ktjEJJzWYk86233OLkkSTmsxWdOfpsRUgD7Kde4vhUVgIJl2D77nSbXvnDIWQdSji0tF0zQEnR5n0QmXMKkymqYiGMLJ/fw5T+eKWmlxFYk77RQtnwS4BM7gtPBgnzA6xghVo28u5sThvYyc8B9mv/Y01oLjVABHbHD1VNdonai8Kp+ZTJUIxUJCrMTs7VZGnacnNULjnj4SX6ycwzvPVtdf3TtvMZbSEgZlVHJeuwiOlZQQhZMeiRIF5Qr9DBKmXRvJ2rud/asX8/6wdEbP2MjNV/fyWaOuYKk+64pA8Ha6LynMo+LH93h11NU+r2lOlqE5ghbi7EPTNI7llzBv9e8cLwOTXSKxU286/uFR/pDarsUH4dbUMHNhHlveuAdREFE1tZZ+gatBxV007q1hggAZY1yaYzcdp2jJRI9+CbiKwgWdvmr4sebRr7zPn3dpVlUg1hj9Gj7+DSY9dRtpiQLhBok8B/xhquu7520QPSgTdIqNrw84GNZTxx/aS9zaQ2Tu8vVsftaVafPOynyw6BduyHByXno4h0wONpxQyIgVOWJWOVB8mN/2HGXJr5t4f1git804zJuPZNRaw1+w1JxaJzcN6dPZrF+hoKqNsX7fCVLPudzze38uunumj8dmOuEaNKwqnjsvRXaSc/QgkiQRn5TCa58t4aUHhxAx8Hkcdlddl2nJOzjsNhTZyfFDe1Hc416qENyOwUL171Pu/jsGY1itWgTvWoWKE3sJXzuZe69qBxUnGdkvlpkffklapObjmeWPjyaMYsORYuZsqCBcJ1BudzJ5UBgvrLAx/goj3+2zoSmuolj3rMJPVi3id83Jr8cslFY4qXSqhOkEPhgczhvrHAzrqWf+Wy8w6jwd7QwVjOyr44c9xXS7tr1nDX/Bkrd1BTQcgDVEfHIaO0yORl9XH6fi2DpE20bTNFZsPsT323PIrRCJTOlE96vGckF6h1O+l5oatmf6eBRbBU5rMZqq1tIvN+5hv+OGXkbikBdQZNmjXwCK04ksO9GqGjld/xeqNQyXfqWOeA0UGb3BSEVeFubv32+Ufn35+vMsf+sh+nZJq/Mz/rL1AL8fsfDhWhuapvHF7eEcLlbZla8SJqgoqmt33lmZRT9vpaJM5sfDZZTaFDQEpt4Sxgsr7NzWS88Tb8zk/nP1JBkc3NtXx5K9Vl5IMdab2WkLR2ttXb9CQVUbY+2eE3S87u56X6PYKkgb8S8ACr+fStEylzAo5SWI4TEoFWYMuurutuzZL6I47AhVHW8nP37Us5a3q29NBL3Rk6ESBAG53GWIJ+AqMHVYismZ/SLt73sd+eBaVxdhpA57lQNoNFb+fl0yY7/6gD5X3VyrI8/N42/N4qMJo7AWHKewyMQ9vfWsOa5gl+GHLJkbMnVc994+4qrcjGVFIUxwsORpl4Hfnt17uGeBlSE99KRFC5ybIjJzm41IwwlG9jsfW8kJbu6qY/TifGbulD13xN6Dp914W1cA9QZggRKRcS57j+RyTpfQTMAQLYcsKyzfuJ/vd+ZRaNfT/sLryRz+MH0iT5+1iD8UW4UnW16w5L1a+gWgOGye12uqTO6cF91j8mrrl+o9CKsat36haWhVr1HKzQhC4Pr1wPlxvLlmF3+ZUsTnr4ypMzD59p1x3Dp+Clv2HeGuXlXd1DscKBqkRwn0e+846YnVfw6JJ/aRECHxw4OdOZmXz8ebbYgCdIgRGdhNx4xtDqIMxYzul0lJSTEDu+m4f3EJM3c6PTVc/jI7bf1orS0QCqpOM0nRRr9F6f7aYDVNY2++nf6JyT6Pu4sv3ciy03N3lzr8H8ilBWiaSsG8l131A4KErGqeYcEvPTiE4sJ8Ojw1E9VpR9S73ltTVU5Mvpf8eS8hhse40uJonkBLioyrqkcwkjLkOYqWTCRxyAueotHKguOe+gYl93fmH7cxf/dJZKsFneEQd3WXSDdUMKyr4qlxqgt3JuuVO/oxb48Vg6QyeVAYTy+34VBEBGMU4z9bDVS3ObsDn7QuPUiN2c3oK9Jp1yGNJ9OdrMw9xtC+0a7XRHYhGRhd5Krhqi848rau8MZfABYo3a8YxLwfJ/H3UFAVogXYtO8Ys1cfIk+OJL3fjfS5/7KgNblA4yYK+KO0yOSTgXI3viiyTOLN4xAkvY9+ASBJHv2KS07DapOJu+V5dLGpvvo1dTT58/6KFBnn0S8ATXagOV03koIhnMQbHyciLZPj05+i4yNTgcD0a/XBMu7tY+SrPccaDEy+fWccnYf9hYX77MzbU4FR0nh/UBjPLLchSjo2f/Gq57Xu7rekKB2kpbK3KJvJg4wkxkbxyuAIVufme0ahJUWl0B0YZ2q4S66tH621BUJB1SmgqbYJNbnw0fc5nG9h5aZqN98SUwE6u53/b++8w6Ossj/+uVPSE0hIQu8CoiAWYP2JiIpYo+haABFFZVFcVBZ3dRVXV11X3V0rzYKKKFXUdUVXWZoNEaQI0osQWkgvkzKZcn9/TGYyk8xkJplJ3pnM/TxPnoe885YTMV/Oe+6535PQodYAU+gNrp4nACntxKR3Q5+URsc7XsGSfxSdDkyrXnNdI63VjqqTlNjKCmp6DgCdviaJEki7lc73voO1JBeQYLOCEJxa+hdyljxOXLrvpYOEy6e5tkMfeGMKnZLs/G5UEqLoKA9ckMC/59fuyGuIpz/e4kqaLhiczp2V+WxPvYLjB3ZhKikiqU0quzet45tDx1m8rQKdToeprIyb+uhJoBJwVJeu6l7Noo0FLNqQQ2JKqmtIqL/kqKFlyqaSmtGBnfmVIb+vInopMVUy76vtbM0uJbnfMPrcOIYz27Zr0r289W0CLnfxQCcK+LqPzWZ1vYhBjX4Zai0X6uoXQHXur5jWzfG4T8F/X0XarJ76VXtXpN1G5ynvgJRYS3PBanHpV+6yJ4hJSUdn9O567k2/br4wga+25XB19yq+PiT5ZPWPfpfRDn/qmNPtTJpGD2nDvvIiFu+SFJSUu67938bd7DpYwAc/V1FcXsXo0xz9YmUVVfRpl8Ko7jbe2VjCvA1FtGuTiK7GidlfchTpS2uRgEqqWoCm2CZA/WQs+2QehsQ0IIYzJ73IznkP1ZS8JZW52a7zpM17udsXxXk5oNNjMxW6jmXe/JSjLI5Eb4xD2u3kLH4Ua8kpsNsQOgPCGEtcehd0NT1avSa9SM7RQ67eKmm3YC3L58is29EJnWvYqNFWybV9Yom3lhEbB5lJBsYNMPitVoH3nqal85aSbLS7luD6D7mYhNJf6T5yPJeOm8IbD9/GlznZfPkpOHweAVKIS9bT3ljuOk9LZEIaNpsdvV5NgFc0ncLScmZ9tpWdhToGXnMXI0b3Dvqe3vo2Aa/VKXfqJlHOhnJ9XIJLv2xVDn+rYPQLwFx0EnQGVzKVefNTzrshpUSnj3HoV3FOA/o1s1H6tTXbxAVdJD1S9fz2dCPfHg+s6btuX9PVveGN7wuYs3wtf7nb8cI8amh/zCW5ZI1yDI7+9lQ+37r2puYCcbRJ0ZFmMJM1apiqMoURKqkKIb4qUifzijizCferm4zlL55DxpX3c/KDPwGO3gNjWmfyV7xUWxbH0XsgdDrnhAO/SKGj/S1PEdPeUe06+d40YjJ7Up2fjbBZXRUoodNhbNsBS+Hxmqntjp05OrfGT/eRMQDWzI6uhlInbzx8G4u3HWbeunzSE3TodY5REbmVm13VJl/U7WlqG6fjqq6V2GNT+PabTzhj2BX17A68VZecBpwvZjXNFiHUpJ4xnNVbtnL5kH6axaCIXH7cnc3cr3Yjkzty+qh7uaxT90bfw1clqTgvp0kx1U3Gjh/eT2x6N07Md8yac/ZOHZ0/3bXMBg79suQfxbFUF+DOQ52Bzve+ja6mTyomsydSSix5hxFAfGa3kOrX0u3Z5BcWEY+ZjAQdOh1kJBpY8XVgHlHOviaL1Y7BVsWk82J478v13HfTJUgpPSwPvBlmOi0a5mZFnuN4a0clVSHEV0Xq+HO/83J24xHGWE4sfQJLWaGrkVKf5EgEdDEJdB7/HACHXxtP4Yp/UV2Sh6yZKKxPbIswxjoSJSGwWa0U5efy3NRxLlFxuetLXwM9BbbSvHpH7dLeKOG95x8fuJbwpg5Pdx2f9W2+34bvukt7Dn8rC30yK7i2b2LAflPB2iKEms69+/Pd6pUqqVI0ilOFpTyxeCO2Dmdx/r0voTc0XdJ9VaQ2Pz8mmBA9OL7wUQ/9Ovrug0ibFX18iod+FX3xkmP3n83m8JRy0y8ApHTpV1JSMgWnTtYclvX1S9T+wRgTi6XOaFq7tFOUn8vOeQ9x5qT6+l0X50vaOw9ew83dS5h+URvXZy99U+LXI2rmh2tJTYxh0Q4zpeVVYK0mJU4Qi433VtRYyPixPAiFLYKieVBJVYRgNpWA0GGvrqD9mL8Rn9mNytxsjOldsVvMnFr0iOvcmJQMzpo6l0Pz7nfs7jt8kPSsh2qMOwEJOkMMxqQ0TKYy2tSUtQlgZJG02zGmdfYw/zMmpWErL25Us2pTG77dl/acpqDOMRG5pVW8P9u/35QvW4QzLrySz17/W5NGzwRLakYHjlX5P0+hADieV8w/Pt5MkSGTQbfMICU13f9FYUBI9AvHC6ZTv56dv4IpVw92fODjZbDu9+49XMakNDJH/4njCx8NeOh6WXEh+fkFLCq1Narpe8Hn6+nZVkfWqGHcfs0F9Ua53Lh4I3Yp+eRWhzZ5szzwZYuQNfxsHp3zcZNHzyhCg0qqmoHv33qS6qraxmOb3c7amQ8TExfPsN891cCVvjGbijG26UDV8T1eP5c2q6sXwGns6WwkfW7qOI4u/6vLIM+JPi4BqPFIEqK2uVNQa/gnamdhYbeRs+RRDImp6N2WG/VxCSTFGVxlcvdlBJOpzDX02BkPNK3hu66Tubmq0mMpMN5aVs9vKquXnVnTbmbqKx+6EiVftgifzfkrhrzdmlWtTpa3+CMVEUaV2cLcL7bw4ykjQ296nAFt0/xf1EicvU5O7HYb22dNcfVCNQfB6pfQ6RxjZnS6Wv1yS7DM+dlgt3Fk1u1Iu52YlHSP+3To2ouKGu++QPQre/dmPnhiAr85s0fAP2NdJ/Nyc3U9e4MRnavZccpGepJjU0GbOB3npZqY89Fa/nKXIw5ftgiPzPqQktwTqmqlMSqpagaqqyrpeufLru9LTx7BaNBzcsnjHs3pjZkenrNnM/FdzqZ0xyqk3SFA0m5FWqvRCYHBYHTtTjHXiIMTp21Cr0kzPQTTVlVBUU1zegYSaa0RKGMcJ+c/iK28CKQkLdNhatetd1+PJQJ3K4ecJY8zY2IWJQX5WKuriE31NMLTxyWAl34Nf7gPLq67ZPfBus/YJWyualdZUT7SbsMq81i40yGoVeUmLOYKj0TJW5XMarNhr9zCwjtPq9djFczw5MYQl5jscK1vZgdrRWSyeuuvvL76AP2vvouR157VbM9x9jo5qcg5hN5gIGfJ4x6VnEAtE5zo9XqkzRqUfkFt0ufUrxkTs7DbbY4lP2u1S78ArGUFGAxG2rRLp1vvvh73ctev44f3U5Sfy9Rrzw9Iv0oO76RTX//VQffBxXWX7Bas2YJB2D0qXblF5VhstY7speVVVFZW0e74FldS5c0WwWqzU1lRyH/u6uS1xyrYAcqKwFFJVQtgNOgZ2LM9uvQUDn0wPeDr3D2sCouKiUndgqwqIyY2lg5dezlEofQUNqsFc2meq/dBJ3TMmJjl8WblpK5gmvOzKV71OrlLn0C4jYEA0CHp0quP623RZCqjKD/X5Sljs1qJSeuMIcZRiu81aSbHD+/n1L9foNPEV6jKP+bytDq19HGqBD7j8oVzNt+3H73D4U1feSzZfba/hAnPf+wz0SkrLmTBIzfz998YmbFuuStR8lYlc/Z4eeuxCmZ4cmPIPON8PvthK9ddcHqzPUMReZirLTy24Hsq2w/i0qn/xGAwtujz9QYDnXv0qZfs+MObf5U3/QKoLqmvX857NKRhznFahc/dwvE37kan89SwGKOB1/6zwaVfMyZmuTTMXb/AsQzYLmu6S78Al4a56xcAFUW8O+Exv/8NnLP5Zn+4lnUbf/ZYsltxoIwP//mQzyQnv9jEjQ+9wowrY3h2o85lueDNFsFp0eCrxyrYAcqKwFFJVQhxJkEWUymVebVVEIO+aZUHdw+rzOtrlw1zP/0nzncUfVwCbS+7l9iaPgR3/G15dnLmpBc5NO/+BgXTWaHaPmsKsendqMp3/Hx2KbFUm7HWjJCwWS2ua6TdjjHdMY9Kn5RK5rUP0blHn4Djcl/uu/X9JUw4r02jnMw3/Xcpl3cup2eyhVGdfTejNzR6RkoZ1PDkxpCS2RVT9sZmubciMvnsh71mlluuAAAgAElEQVS8820259/6R9I6NO8IGWcS5D4vFDxn7jUG92TIfUmtrn6dOelFcvR6zvvz0nr3CFQrBj+6rEENc6+wb581BYmj1cGpX+A0TbZ4XOfUMHf9AvjlhRv9xuS+3Hf9++u567ykRjmZL/h8PSM6V9M92caITtU+z/U3eiYUA5QVgaOSqhDiTIJ63fYSZ/ZsH9J7V1rsnFmnZA2OZTdfxpvFeTmuN6uSgnw2Pz8Gu93GsfceIu2K+yj4cjbSauYUYKso5r6soeiEjs7de/qsJOnjEjgxfxrVZYWARJ+Y6ujHMsbhnGhjryx17erRxafQYdzfm/Qzuy/3JYlKFmy0sWyn55w8X43tZcWF7F63nKkXWuiaamR0TwtT3apV3p7jLWEDWnSXoPddl4poo6y8in8s/5Hijv/HldOmucxpmxPn7/yMiVmu5CFUOJMab/p1aN79CC/+L44ZprmutgJnJctuqcacn43NaqHgy9mcsjqSosZoWM7ix3DXL2mzIWqMP536BYDOQMcJ/6p3D4PO/4uy+3JfLNW8tbGUpXX0y1dTe36xiU/XbOSFC210TzVwdU8rj6zd6DUh8jd6Ru0UbFlUUhUxOH6J6/qomNMzAejlpYFUCl29LdLHD++nYMVL6A1GkDY63TULAEv+UeIzu2HOz/ZwWq+Ls1F1+6wp2KSkw/h/YIyJ9djJo09MpeMdr2C3VHNq0cON/knLigt5/+8PQskJnhznaE798J7+3Lq0hAnPLw2oUuSsUvVI1RFr0NEjVeezWuVrJ2LcsXXoKotCNjzZH2mZHdmwuoDxI0N+a0UEUVxWwX1vfUf/6x/krG69/F8QQXjTL+dg97o4Z5jWTfA2Pz+Gzj36ONoQmqhhgegXwIl36i+1VeUdbrB6l19sYuIz8ykrKeajsY5Zhavu6cYty8q8ek55w1ml6pGqJ86go0eq3me1qqHRM7dfc0HYD1BubaikqhkIZJ5fqEbXhCuOpnfpcGW3Vju8aBoQImcz+LX3zODdJycjSk9y46Bk2iU6Kn6NHVy8e9M6vjlQzFe7as1F8yvsZBZ/Xe96XzsR1yyeS5fDy/nTh4f41829QjI8uSHiEpLQxcSH/L6KyOHLnw7y9ncnGHbn0yQmt/F/QTMQ6Cw/f+NrIhnnph3nn931q+LEfhLi64+zyS82cecz73HkZAFV5aWMHZTQ5MHF/9u4m1/2l7Fil8NY1G6HvAo7A0p217u+odEzT89bQXFRMUKkNCkOReNRSVUzEEhS1NTRNcGi1+uxmArJWfI4UlLjXIyXWVn+0cUkcGzunTVzBm1u9xCcWvwYaVf8Hlt5MXmLHiE2qU29t1R3nM3gH7/2OG0rj1GhEyz8qZh/H8j2WPoIdHBx/yEXM7pLUT1z0X0dRwT88+3f+j3fHiwkw1jByJkHSEpOblQMCkVj+GzDfj7LjuXyqc9rGkegCVFTx9cEi07oODTvforyc4PSMF1MAicXTMdeUYyUeOjXyQXT0RnjsFXU1y+7tdrr/RZ8vp68E4cxlVqIj9Hx7k9lrDgoXHP5IPDBxaOG9mdU54p6xqJ07t+on/HjtZvRSStDZh4jLbn2hU0NUG4+gkqqhBA3A38F+gNDpZQ/hSIoRePw9WbpPn7BidOPBcBUZSU+s1u9cwJ5hsVUSIexfyP303/Q9e5ZrvK5tFZjjInlxPxpxMTGIgQkxRmAatf1dd94nY3ir43uyIS3N/Pa1fH86wcr157XnmPdb+LScVM8bA0Coanmou7c+thrfPDoWGZnJfL7FeUBLz36oqWsGRSBEy4a9vH3e/kqpw1Dfnu3Fo/XHG8aZjEVeq1ut2mX7loyDFTDfOmXXq8n99N/knr1dA/9AjgxfxoGvbGeflUWnuT0bpke93f2QD1+oYFnv7Fi0OsYe14qiT2HMH385R6WBoHQ0JJeoMlQfrGJtEQDc8d0Y8qKioCXHhu6n7Jl8E+wlapfgN8Cb4QgFkUDxMXG+CzJ+3qz9NanECj+SvvPTR2HadVr2CtLPTxn3Ns33Q31GsLZKN7BYGLsGTqeWF3Bdf3jqbZVs/cbRw9TY20NmmIu6iuuUDWpt5Q1g6JRaK5hy77ZxdrCDAaPnqhVCM2OvyVFbxo2Y2JWg9VtfzSkYUlJya6+K4up0Kt+QW0C586W959i7p2DPY4t+Hw9wztWMTBTcGE3PbvzJNgsrlmAjbU0aGhJL1BC3aCubBkCI6ikSkq5G4gKs0Kte6Cuv/Yqhk54vFHX+BOy4rz9HJl1u8dnzp0z/kr7dXcKuXvO2PB0RW4IZ5Xq8ZuSqCo4zLiBRj7cZaHSYuPHI0WM7Bfj8qhqCVuDunGFqkm9rhu81gOcFQ601rAP1uxkQ0Vnzs0a3+zP0rIHqin396dfSUnJTdYw90RpxsQsYmJjPfQLHBqWlN7b41pLtRl7RbHHMWeV6tkLbOiF4MbTDdxzoIqSvSYuPyPV5VHVkpYG/mwWmno/ZcvgH9VTFSBa9UAFQzBC2ZgqV92xFk4CEWtnNSjeWobeaKcSuPvcGN7dWsUlvWJZvKmQmKTPmDAoptlsDbwtyzVks9CUZwdb9VKl99bHe6t2sNnSk0FXhm5gcUNo1QPVVPxph7/PA9Ww4rwcTJ/+s95xIe31nmGprqZHRpLHMedOva4pAlO15KwOekb2MrDphI13fyolJXEztw+KaTZLA2/a4M9mobEEW/WKJv3ym1QJIVYBHbx8NENK+WmgDxJCTAYmA7zx8Bgmjx4WcJCtkUB2CDYHDb2tAq7P3J3T9Xq91zL8c1PHUZSfi4hLpuPtL7mOC8BWeqrBbc1OnL1P877Ox2a1kGiEpBiBXidY8asRi10SU17AmEGObdXNYWvgbVkuFD1ZTkJR9VKl96YTCg3z0K+npjL5sr5BxbT3aB6rf7Ux/M6WSaiag0B3CYaaUGmY8z42Kel46wuu4wIwxMTWq4ABnNi3lSEdPHdlrtuyj217y3nrRxsJRklSjCO5KjEL4uKgotzErWc7fASbw9LAmzaEoifLSSiqXtGkX36TKinlZaF4kJTyTeBNANbPjHqHQ61sE/y9rbq7DjsnuTvdlXOOHqKoxowPHKIl7TbsFcVYi0+57iXtVmJiA0sOnb1PZcWFfPDoWBaNcbimF5RbuH7eEUqKSxjdT0+ctQyIC7mtga9luVD0ZDkJtuqlSu/BEQoN89CvPZ9LCg81+V7VFiszFm/msgde9n9yGKOVbUJTNcxpPOrUsKL8XMeQZkk9/bIZDF43+hQf289Fl3gaLf/nxankF5u45eFXWXZLMulJBvJNVi6bd4q84gqy+urBUgkYQ25p4EsbQtGT5STYqle06Zda/lP4xFptRuKY8eeclSXikkm44g906NqL44f3E5vejcMzb/fYgWPOz6ZD115sLcj3WoL3tixYN/FoG6fjyi4VLCywsfKgnWW7sklMrXTZKzTkpP7+3x9EIJgw41W/laBQN6N7I9CqV3lZCQZb/b495Yjcunh0/tcMHvcweoOS3+bEZrO6RtDYrFZsVivGtM7o4lNcc0p96VfnHn3Ipf4SYkVhDj99mc7mNz0rdHUTjzZxOkZ2tbCgwM5XByUf7j5FRmqly16hISf1ic/MRyCY/8REv8lHS2hDsFWvaNOvYC0VbgBmAhnA50KIbVLKK0ISmcKTZqrtub+9Aa5yuV2n58SC6Qi9AZupiJjkNKrLCjGmdfYYM9EQdmkPqIejrLiQNcvn8VNCHEu3VwFQVW7Cbq6mU4qOLya04dUteo72Ge834dn036WU/7qVNnE6vwmSe5N8/vEjjBnUntuWh76JPNCqV0lBLuf1SvM4FuqGU4UnLa1hX287SEm7QZzesWtzPSLq8KVh6GPIWeiY6GAzFTkOJaUiDIFV0b3pV+6adyg6tL7euV/9uJtt+/JZsK0Sg15HaXkV5qpqOqfo+HJCMnO2QNt+w/wmEws+X8/BQ0doGyf8Jh/u2mCx2rmwYxV/8jHKJhiCqXpFo34Fu/vvE+CTEMUS1mjVA+XEUnrK/0lNwGazEZvezVEGB1e5POPqB13fn5g/jbOmzmX7rCl0Gv+cx7DVULDpv0vpnaqn+8jbXJ5U8/90MynVZj68JYm28aLB2X1OyooL2blmOe3iJY8PN/C4n/Pdm+TNlkrirGVc20eEleVBqBtOFZ60tIa9sWo/F03VZtlPqx6o5saXhrW7cioJHRx9VCfmTwOg08RXmqxftuoqZHVlPdsFgCt+0x9LaS5Zoy7k9msu4PrpL6M3W/nolkTaxAuu7mnxObvPSX6xiY9X/0i7eMmM4Uae93O+uzbkFJTSxmBlRCcZVtoQjfql6s8BovXomMw2YTy+RNpcogWOrcjm9EyvQ1Lr4q2nadN/l9IlppSLexpJT3Qs9zU0u8+J+3Vnd9T7PX//1u/ZnFPBvHX5pMULCiuPktg2nRSNHNOLj+4lvc7fcygbThXasmrzQVIHjNBs2S/SR8c0GwHql7WihOSMTpiObfU4XrdnqLyqmgxjFcN7uOuX79l9ThZ8vt513TkdDX7Pd2rDBz+fIq+olLR4HYWV3kfZaEU06pdKqiKExZ9/R8WXl6M3xngcb6zHTN23VWezpj4uAah9o7OYCgE8PquLXq/HnJ+N0OlrXIdrnpHem0dnLQ5oS7O3nqb9W78n+0g527JtvPJDpetcodPT0eS7l2rnmuWkVFcwYVBg1a17/vEBaxbPpe/JT5g6PL1mjM0NzZZQ+XNULzi0g8vvPsvjWCgbThXaIaXkzdV7ufSB+7QORRNC6ZEVqIaZS/OITckAQB+XUM/2pbH6ZS48Tnxc/apR3Z6hBWs2c6qwio3Zdl78ocrteTrOLvfdS/Xx6h/Rm83cPiiw6pZTG15auBKOb2b6RW2aNMomUJpiixCN+qWSqghB6vSkXziW1IGXeBxvrMeMu4A5LRHaZU33OEev11PxlWOJwr2nQB+XwOFZdyDtVnJ0teMjhLQ3SRx9WQ00ZRxMU6pboTb4DCTGhhzVK0uLQv5MRXjw68lCErueGbXN6aH0yHLqjHui5q5hTvuEzc+P4aypc13Hd857KCj9qji6ix7DrqDoh2WuY956hlYcsLN6ztON6hlyr1I1prrVkj1L0WSLEAzR+RsegQghsJTm+T2vMW+EJlMZxqQ0Vw+CE2fPQVJSMlueuxnptrVY2m0YElOJSUzhzEm1Zqi+ejUa6uEIpcFmU6pboTb4bIhAHNU7t1G/jq2Vhet2c/rF4WmyGU40Vr96TZrpYZ0AtfqlE7qQ6ldFwUl271tLRkqc61ioeobWbdnHpuzGVbdC+Xx/RJstQjAoFY8QjEYjsqLE73n+3gidolWcl4NNSoTQc3imc8inAGlD6PR07VFbAne/n3MbsnsPgi/8vfmF0mCzKb5SoXy+P/xZN+SfPEpGQv0W2GhyIm7NnCzXcU5mJ63DCHsaq1/Fr0zCXllWo2G1+mVOz/Q6qiYY/drx3gxeu/sCj2Oh6hlq6jJZS/UsBWOLEG0appKqCELYLdgtZnRG/zsO646OsZgKmTExi+K8HM599EO2z5pCRs1kdsA1nf3E/GkkxRlapKk1lAab4fz8QJYZi/JOcUnPtHrXqpJ75GOqMFMsvfclKnzjrmFO/SrKzyUuvQuG5PQG9cs52y+YofLuWMxmLNVV9Y5r3TPUEs8Pdokx2jSsvmWsImyJS25LdVlhQOfaqiroNPEV11eHsX+j16SZHqXwSKKsuJC3ZtyNqSTy+o4aWmZ0krN1Fef391yGdS+5r/h6EwUl5S0atyI0WKw2ElLU8OzG4q5hTv3qMPZvXueMNjenjh1meL+MoO6RX2zixj+/HnG/xw0tMfojGjVMVao0ZuiU2eSX1XfRTk+O9bBxSE+OJWf7SorW/4f4trW/3L48ZqpNRVTm1vqxSLvDFV3a/dschCO+mrz97agLBwJZZrSV5ZKaMtDj82hzIm6tfPT9XjoNvFbrMJqFQHugGuuRtXPeQ1SXFbo0zKlfNqs1RJE3jorSQuKMev8nNoCvik24L48Fs8QYjRqmkiqNyS8zc+bvXqx3vK7RqDPBuuet9Qy+81n/N5bSVRqH2vK4dLNmFzodlvyjjs/tVuwGAxZTIUnpvX3e1m61UpFzCHNpHpufrx0Ga9B5s8QLDQ01ea9eMpfqA9+yevFcRt/7WLPFEAz+lhnzTxxhQEfP5aFodCJurZRVWWmT2VnrMJqFQHf1NbadwFZVgT4ptd7yXkVO7czFltSvUz+vZfSdAxs8pyF8NXrnF5sY+fsXMVaXMuejtfzlrtAsV4aSpi4xRquGqaQqwkjRmSkvLSYxpa3Xz51vhFLakNZq13FvkhGXXjsY1Dnvypye2eAbptMTJiGzu9/dM3VpalXJV5N3WXEhe1Yt4bXLYnhg1WJGjpsSttWqhji292fuGOjZxByNTsQKhVNvLKZCRFyyS8O01q9qUxF6va7JVSVfFZs5y9dRWlzE7KvjeGzlD9x34yWtJuGIVg1TSVWEMebCvrz9/Rece9WtXj93Csp9WUMxxtRvaBfSISDWsnyOzLrddVwndJjbpdcrx9d9w5wxMYuEK/6AzWarnbGFQ6yemzquwTdSfz5N3mioyXv1krlk9bJycc84snpVhXW1qiEKd69n8MhhHsei0YlYoXDqx4yJWZiqrF40TDoSLmlvsn71mjSTnKOH6unXjIlZPv2qOrVx9EM2penaV8Uma/jZfPDF9/y2v4FhXfWM7GoJ22pVU4hWDVNJVYRxzmkdKfnyG8B7UuVESLvXbcPS7hiG3Dajg8fxxph3OmdtuWNMSvPaW+EkEJ8mb/hq8v72o3fYs2oJz99kJN4omHSukeuXR161ymI2k26sQqfz3ECg9a4iRejIbBPHzqMHSElN1zqUiEIfl1BPw6pL89GJ4PQL6muYMSmNXpNmeq1YlRUXECOsTfZq8lWxmfbyYoS1irvOjiPBKJg4yMCEFa2nWhWtGqaSqghDr9dxWpqe/JNHSW9g0n3bjA5eex02Pz8mZM7GjcGfT5MvfDV5V1R/zA29rPRr55iV16+dgaxelRFXrdr06Tz+eMUArcNQNCPXDOnN2s+30POs87UOJaJwX55zsvn5MZz756X1jjenfuVk/8rIM9o3uenaW8XGarOTW5jLuAF6+qbr0eugZ1vR6qpV0YhKqjQmPTm2XlO687gv/vjbwdy76H0uudN38uBrt00gQ45DjT+fpoZ6rXw1eT9543l8vs/Kd9km17GSKknFwY8iJqmy2+3Y8w9xZq9LtQ5F0YwIITTbtdbcNHZXX7D31UK/Cvb8QObgRF7w0XQtpWywz8pbxealhSuZufi/fLbXyteHHTYDNgll1ZB0cotKqiIYlVRpjLttQqCkJMYTX3GSyvIy4hO9i5evUniwZnhJSckcXfI4xiRPo0rHMNNqr9f4GwcTSK9V3cRLSkmFTUeFh+2JQOqk1+vDkQM/reWGwcplu7WTkhiHLD6qdRjNQnOZBDenfh2ad7+rYd2Jr6HxAFSVsnZTboNeTf76rOo2uK/bso8qm45q7JS75dt6vY5u7eubACsiB5VURSjjh5/GJz99w1kjrmnR53obXePEVwm+IZ+mIVeNCajXqm7i9fTHW0LzA2nI8W3rGHXnOVqHoWhmhBBkJAhMJUUR1e/XGnFvhPemYd6oLs1n3VHvTdftju3GUmXy22dVt8E9WvuNogGVVEUol5zTmzdfWom86GqEaD6PKG80tuTfkE/TmsVz/fZaNbXJPZw5fnA3A9NsJMb7HzmkiHxuOL8XizauYtCom7UORUHjNKxDm1jen+Y9CXpp4Uo4vrnBPis1jDi6UElVhCKEYMxvurD22y8YeFHg1apQ9ECEquQfyEw8aHqTeziz56v3mH+PalyOFv6vfxdeW/k1qKQqKELVwxWohlWYysBHH1eg5pbR6CoezQSVVAkh/glci6OZ5iBwp5SyOBSBKTzxNs5GSklp2Yc8N9x/tSrQcRItib9eKwg88YoksvduZ0jnGOJijVqHEvW0lIYJIeidpqe4IJe27TJDffuIIBgN0kq/Dv/yI7cM7eb1s0DMLaPVVTyaCbZS9T/gUSmlVQjxAvAo8EjwYSnq4muczaZXp7Bj3aecdcn1DV4f6DiJliSQmXiBJF6Rxt6v3mPRAyO0DkPhoMU07P5rBvHI5x9w4a3Tm+P2YU8wGqSVflWZSonN8P7PZCDmltHqKh7NBJVUSSlXun27AbgpuHAUjSU+Pp7iX9ZQMfhSEpJTtA6nUfibiQeBJV5a0NSRO4d2bmbEackYDMENZ1WEhpbUsA7tUrDmfoel2ux12oEi/KjK/plzrhji9bNAms3D1VU83Ic4RzKh7Km6C6jvyqZoVoQQvHDb+Tzy8RwuuuPPWocTcgJJvLSgKSN3rJZqfv3qbZ6ZfmUzR6doIs2uYX/IGsCbXy1myLUTm/MxihBhNBiC2ggUrrv8mjJuRxEYOn8nCCFWCSF+8fI12u2cGYAVWNjAfSYLIX4SQvz05qffhyZ6BQCdMtpwenI5R/dEvs1AJODs83rxhs7s/eYTTCVFAV238dO3eejagapK1cKEQsM89GvZl02OZfDp3ag6vBmrxbunmyK8qCgN7Hc7knDfjbji600UlJT7v0gRMH6TKinlZVLKAV6+PgUQQtwBZAHjpZQ+nRellG9KKQdLKQdPHj3M12mKJvLIb4ew78t3MVdWaB1Kq8dzN6Kjv8sfp7IP0KnqEEP6e296VTQfodAwD/26JbhK4/SsgWz4+M2g7qFoGTqltL4XIM/diLUGporQEOzuvytxNHWOkFKqf82bEX/jbAwGPY/eMIhXPnmdC7w0wvrbiuxrd01ZYR7JaRler9Nq16CWNGU3oqXazM/LX2HxdFVmDze00LCz+3Sm+w9fk3vsEJlderXEI8OCYOwQtNKvlnUAbH7UbsTmJ9ieqllALPC/mnXnDVLKe4OOSlGPQMbZDOrdkUE7jnN4+w/0OOv/PD7zJyC+dtdoNYA5XGnKbsT1S2fytzHnqmW/8EQTDfvjjUO5+43ZjLzvBfSG6LALDOYlTOlXaFC7EZufYHf/nRaqQBShYdr1g5nw8mLadetHcls1QyrUNHY34q7vVnBNDxv9urdvqRAVjUArDUtJjGfyxT34ZOUizrn6di1CUPjhyM5N9O8amV54vgjX3Yitieh4RYoy5kwewR1z/8aVD77Y4iNsWjuN2Y148tc92PauYezkS5sxIkWkMurcXqzd8S2njuyjffe+WoejqENJTjYX9O+odRghJVx3I7Ym/DaqKyKP5MQ4Hr6mH9/Mf44G9g4omhFzVSU/fzyLV353iUpsFT555rZhbFk+E4vZ7P9khUIR9qikqpVyfv8ujOoh2Ldhpf+TFSHFXFnBt2/OYPakYeh06ldM4Ru9Xscj153Bps/e0ToUhUIRAtTyXyvmjlFn8fInP7B7vZ3+F1zV4Lm+dtcYdCIkA0yjiR8/msNff3smHdpFlsO9Qht+078bO37dwp5N39BvyEVahxORKP1ShAsqqWrl/OGGIcz+7Cd2fWPhjIuu83leNNojhBopJT8un82tZxrp36OD1uEoIoi7rjybP7z1b3I7dokqm4VQofRLES6otYko4PfXnsvp5h38svYTrUNptUgp+WHJK4w7Ha4aojbFKhqHTqfj6fEXsP3jmVRVKIdrhSJSUUlVlHDPVWdzttzNjv+p8YxOyooLeWvG3QGPmfGFlJLvF77IxLPjuOycniGKThFttEmK5/mx57B+yStah6KIEPKLTdz459fVqJkwQiVVUcSdlw9iSOxhtn3pc0RjVOE+FLmpSCn5dsEL3DM0iRED1QgaRXD07NSOcQPj+XnVh1qHoogA3AcjK8IDlVRFGRMuHcBFKSfZ+vl7WoeiKU0diuxOUV4Oq1+dxv3D0rjgjK7NEKUiGrn2/L4k5/zE4V82ah2KIoxRg5HDE5VURSFjLurPqIxC1rz+WKvv3/C1xNeUocjuFJ46zu5lzzH//osZenrnUIasiHKEEPz9juEcX7OAirJSrcOJWgxxifyaq+1//4aW99Rg5PBEJVVRyg0X9OW18Wfx3ZuPcXzfNq3DaTa8LfE5q1Tjzq0dityYatUvq5eT88XLvD7lEuJjY5olbkV0I4Tg5Ukj+PHdv6jESiP6X3AFq+uMo2ppfC3vOatUt5/rGIJ8+7mJqloVJqikKorJSE3mg2kjsW9ewk9fLGx17ut1l/hysg/x1oy7+e6T+T6HIjeExWxm3by/Mth4kJd/dwmxMcaW+DEUUUpqSgJP33IOPy6f1ep+NyMBIQRazkKou7y3LzvXVbVqaDCyQltUUhXlGA16nrrtQsZ2K2L1G09QUpCndUgeBLNDr+4S32dzn8Jwagc/r/uMpdurGD77mOtr6fYq9m/1LUhH92zjmzkP8cJvT2PCpQOC+ZEUioA5rUsGU4elsemjOVqHEpXYg0xmg9mdV3d575FZH7qqVuu27GPRDjODZ+e6vhbtMLNuy76g4lUEjzL/VCCE4JKzezKgezpPLHmOhP4jOf3Ca7QOC/Bcvrt03JSAr3NWqZ4c41jiu2VQEu/P3sicW0/jsdVmJjz/MUlt/E+grygrZePy2ZyTWsGih65QY2cULc4FZ3Tlu10bObhpDb2HqOHcLckJU3DXuy/fTR9/ecDXOatUy25xOL+PG5TAnNkH+WB8Jx5bvYkP/zmNdm0SgwtO0SyofyEULjJSk5k7ZSRDxC5WvvQAp7IPaBpPMDv0nFUq5xJfvLWMcQMMbPq1NKClPiklW1YuY8t7f+Hv13bhjzedrxIqhWY8fNNQzNs/o+DUCa1DiSoSk9s0+dpgdufVXd4T1kpuHWBg/a+VapkvzFH/SijqMXbEGSyefhnm795i9RtPknf8sCZxBLNDb//W711LfMNmZjN89lFW7EkKejIAAA2+SURBVLOx/mCp38b0w7u38eUrf+CGzBO8N+1yundIC9WPpFA0mb/dPpxdy16gOP+U1qFEDWYRQ15RWZOuDWZ3nvvy3rkzTzFk9ilW7LOx7kC5akoPc4QmDZDrZ6quywihqLSC55ZvpCCxN2ddMZ6EFhpEWlZcyAePjmXRmDa0SzRSUG7h1qUlTHh+aUDLdu6sWTyXvic/YerwdNexWd/ms6/jDR5Livt//pHD65Yw8vQ07rhsIHGxqhE9ZLQfAL0v0bLvN3Ts+VxSeEiTR1dUVTN+5jdc+eCLqnLaAuzasIpb2+1jSP/ujbouv9jELQ+/yrJbkklPMpBvsnLLsrImLdu9tHAlHN/M9Itqq2YvfVMCnc9r1JKiIgh0Rjj/3oD0K6ieKiHEM8BowA7kAhOllKo+3YpITUngH3ddzL7sXP71/gyq0/pw2gVZtO/avONY6i7fue/Qa0xvFTiqVltzq1haZ3t0Us73XDpuCns2fc3xH/7NZf3T+PuDl2Aw6EP2cyjCm0jTsIS4GJ66cQAvLnmVC2/9g9bhtHriklLZdbS40UlVQ7vzGpsIrduyjxO5ZhbtyPU43unUPpVUhSFBVaqEEClSytKaPz8AnCGlvNfvhapSFbEcOp7PvzccZMORcroPv4meZ56LMSY25M954+HbMOVm1zuelNmNe/7xQdD3rzCV8cv/lmA6uourB2Zw22VnIUTrKKSEJWFaqWqShmlYqXIyZ8UW9icM4oyLrtM0jmhg74JH+NddFzXqmusemsWJ3Px6xztlpvOfF6eGKjRFS9FSlSqnGNWQCKhkqRUzdMps8svMgKOR++tvv6e80kxcQiL3/+NdOnY/LWSJSSgSp7pYrRay9+7gwNoldEuChy4/g9Nvvizkz1FEDpGqYfdlncv0N1dR0Occ2nVUI5IC4bmp4zCZ6vdHJSUl8+isxT6vO1FqafSzVOIUvQRtqSCEeBa4HSgBLgk6IkXYkl9m5szfvVjv+PbXp9Fu50J++KIUc3wGp104ms69+qHTa7+MVlZcSO6RPRze8BUpwsRFfdN56vfDiDEqNxGFg0jVsOfvvJgJL/2dix94FYNROfv7w2Qqo9ekmfWOH5p3f4PXxae0a66QFK0Qv/+yCCFWAR28fDRDSvmplHIGMEMI8SgwFXjSx30mA5MB3nh4DJNHD2t61IqwQq/XM3X0EABKyytZ8vVHbFpTQgUxxGf2pPf5V5GQlEJCckqzx1JWXMiJ3ZsoPn6QqrwjdEmWnNMjnacnn6sSqSglFBrmoV9PTWXyZX2bM+SAiDEaeGb8+Tz59l+5+HfPhMVLTGtEn9KePUdOcXr39lqHoogA/P4rI6UMdH1kEfA5PpIqKeWbwJuA6qlqxaQkxjP56nOZDFRbrJwsKOXTDW+TnV9GpS6RkmpBUvseJKV3pOuA/wNAbzAQG58Q0P1tVivmygoATCWFHN26DpCYThwgKUaQEWvhkn4dGXZNZ5ITuqLXqx1S0U4oNMxDv8Kgp8pJ3y7p3DakiM//t4xzrhyndTitkox+Q9mw5z8qqVIERLC7//pIKffXfHsdsCf4kBSthRijge4d0njg+lqfJ1OFGVOlmS0Hd7N91XcA5JVUYiIBfQBv2lWmEvq0T0AIQYJex+MX9sGg15OaPETN4lM0mtagYdf8pg8/L/mOo/sG0LXvQK3DaXV0P30g235cpHUYiggh2PWQ54UQ/XBsRz4C+N/5p4hqkhJiSUqI5ep2KVw9tI/W4SgUrULDHr7p/5gy5y3apD9BSlq6/wsUASOEINdUrXUYiggh2N1/N4YqEEX4k54cy863HvJ6XKGIRFqLhhkMep4adz5/WPRPRkx6GmOs+p2sS1JSstem9KQADI2NyRnNEZKiFaIc1RUKRfMTpj5VTSKMeqrqsmXvUV7bbGfYLfdpHUqrYud3X3B18n4uP+80rUNRaEEjfKpUF69CoVC0Es7t15WL0grZs2mt1qG0Ktp26s2xvKbNAFREFyqpUigUilbExMsHYd2+glPZB7QOpdXQsUcfNh4u9X+iIupRSZVCoVC0InQ6Hc9OGMaOf8922Y8ogkOn02Ey29CkXUYRUaikSqFQKFoZSQmx/Gv8eXy3sP4EBEXTMKZ2ptpi1ToMRZijkiqFQqFohXRtn8pt5yTz88qlWofSKtDHJ5OdW6x1GIowRyVVCoVC0UrJ+k0fknI3c3TPFq1DiXh6n38Vq7cf1ToMRZijkiqFQqFoxTx3x0Xs+++7VJjU7rVgEDoddrvqqVI0jEqqFAqFohUjhGDOPcNZ/84TqtE6CNq0y2DLkRKtw1CEOSqpUigUilZOWkoi943qyw9LXlGJVRMxGIzEJSRpHYYizFFJlUKhUEQBIwZ245Z+sHXlMq1DiVhUQqrwh0qqFAqFIkq4ckgfOpdt5+CWr7UOJSIptsZQYqrUOgxFGKOSKoVCoYgShBA8PvYC8n/8mLLiAq3DiTiS0jtiVl5VigZQSZVCoVBEGa/cPYJt7z9DSUGe1qEoFK0KlVQpFApFlJGUEMvrU0awffEzlBUXah2OQtFqUEmVQqFQRCEJcTH8bewQNi17FbvNpnU4CkWrQCVVCoVCEaV065DGjKu68/XbT2Gzql4hhSJYVFKlUCgUUcyAnu35yzU9+XreX7FaLVqHE9aYcn4lOSFW6zAUYUxIkiohxB+FEFIIkR6K+ykUCkVLEu0a1r97Jk9ffxrfvPUEVku11uGELanxeuJjY7QOQxHGBJ1UCSG6AqOA7ODDUSgUipZFaZiDPl0y+PtN/Vn35l+wmM1ahxN22G02qioqtA5DEeaEolL1MvAwoKxmFQpFJKI0rIaeHdvxr7ED+fqtx6k2V2kdTlhRlJfDoK6JWoehCHOCSqqEENcBx6WUP4coHoVCoWgxlIbVp2v7VF4efw4b5v6BY3u3ah1O2HBww38ZOaib1mEowhyDvxOEEKuADl4+mgE8BlweyIOEEJOByQBvPD6JyTdd1ogwFQpFRBObotmjQ6FhHvr17J+YfM15IY0x3OiUmMmCJ07jz2/9B2Pv02iT2k7rkDQnzl5J375DwKDXOhRFS6P3myq5EE0dECmEGAisBpyLzF2AE8BQKWVOk24aAoQQk6WUb2r1/HCLA8InlnCJA8InlnCJA8IrlpYgHDUsnP4OwiUWFUd9wiWWcIkDwieWJidV9W4kxGFgsJQyPyQ3bHocP0kpB2sZQzjFAeETS7jEAeETS7jEAeEVixaEg4aF099BuMSi4qhPuMQSLnFA+MSifKoUCoVCoVAoQkDgC4V+kFL2CNW9FAqFoqVRGqZQKIKlNVaqNF9TrSFc4oDwiSVc4oDwiSVc4oDwiiVaCae/g3CJRcVRn3CJJVzigDCJJWQ9VQqFQqFQKBTRTGusVCkUCoVCoVC0OK0uqRJCPCOE2C6E2CaEWCmE6KRhLP8UQuypiecTIURbjeK4WQixUwhhF0JosjtCCHGlEGKvEOKAEOLPWsRQE8c7QohcIcQvWsVQE0dXIcRaIcTumr+bBzWKI04IsVEI8XNNHE9pEYeilnDRsHDRr5pYNNUwpV/14ggL/aqJJaw0rNUt/wkhUqSUpTV/fgA4Q0p5r0axXA6skVJahRAvAEgpH9Egjv6AHXgD+KOU8qcWfr4e2IdjvtoxYBMwTkq5qyXjqInlIsAELJBSDmjp57vF0RHoKKXcIoRIBjYD17f0fxMhhAASpZQmIYQR+A54UEq5oSXjUNQSLhoWLvpVE4tmGqb0y2scYaFfNbGElYa1ukqVU4xqSETDeV5SypVSSmvNtxtwmAtqEcduKeVeLZ5dw1DggJTykJSyGlgCjNYiECnlN0ChFs+uE8dJKeWWmj+XAbuBzhrEIaWUpppvjTVfretNK8IIFw0LF/2qiUVLDVP6VT+OsNCvmueHlYa1uqQKQAjxrBDiKDAeeELreGq4C/iv1kFoRGfgqNv3x9DoFzAcEUL0AM4BftTo+XohxDYgF/iflFKTOBS1hKGGKf2qRemXG1rrV00MYaNhEZlUCSFWCSF+8fI1GkBKOUNK2RVYCEzVMpaac2YA1pp4NItDQ4SXY6oaAgghkoCPgGl1KhQthpTSJqU8G0clYqgQQrNlhWghXDQsXPQr0Fg0QumXD8JBvyC8NCxk5p8tiZQy0GnMi4DPgSe1ikUIcQeQBYyUzdjA1oj/JlpwDOjq9r1zxlpUU7P+/xGwUEr5sdbxSCmLhRDrgCsBTRthWzvhomHhol+BxKIhSr+8EG76BeGhYRFZqWoIIUQft2+vA/ZoGMuVwCPAdVLKCn/nt2I2AX2EED2FEDHAWOA/GsekKTXNlW8Du6WUL2kYR4ZzV5cQIh64DA1/ZxTho2FKv1wo/apDuOhXTSxhpWGtcfffR0A/HDtFjgD3SimPaxTLASAWKKg5tEGjXTw3ADOBDKAY2CalvKKFY7gaeAXQA+9IKZ9tyee7xbEYuBhIB04BT0op39YgjguBb4EdOP5fBXhMSvlFC8dxFvAejr8XHbBMSvl0S8ag8CRcNCxc9KsmFk01TOlXvTjCQr9qYgkrDWt1SZVCoVAoFAqFFrS65T+FQqFQKBQKLVBJlUKhUCgUCkUIUEmVQqFQKBQKRQhQSZVCoVAoFApFCFBJlUKhUCgUCkUIUEmVQqFQKBQKRQhQSZVCoVAoFApFCFBJlUKhUCgUCkUI+H/sNw8d2q6tqwAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlUAAAHiCAYAAADBITniAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzsnXd4VFX6gN8zJXXSQ6ihI6IoCoq9oaggiIqCWNF1cVnBdXVlLT9d3dW1rQgCunZFBUGaCLKIClhQqtKk11BCMumTZPr5/TGFmWSSzGQmmZTzPs88T2buvWfOjM7Ld879zneElBKFQqFQKBQKRXhoot0BhUKhUCgUipaACqoUCoVCoVAoIoAKqhQKhUKhUCgigAqqFAqFQqFQKCKACqoUCoVCoVAoIoAKqhQKhUKhUCgigAqqWjhCiNuFEF/X89rtQojLI9ylJo8QYpkQ4u5o90OhUEQGIcTlQogj0e6HouWjgqomhBDioBDiqki2KaX8VEp5dRDv/aEQ4rkq154upVwVyvsJIboKIaQQwuR+HBRCPBZit6OKlHKIlPKjaPdDoWjJuN1Q6fZErttBhmj3K1zc/iv3cWBxI7+/CiCjiAqqFA1FqpTSANwMPCWEGBzpNxBC6CLdpkKhaFSGuz1xFnA28HiU+xMp+kkpDe5HaqgXK7c1X1RQ1UwQQvxRCLFXCFEohFgshOjgc+xqIcQuIUSJEOINIcRqIcR97mNjhRA/uv8WQojXhBB57nO3CCH6CiHGAbcDk9wjqy/d53tnzoQQWiHEE0KIfUKIMiHERiFEdl39llJuALbjkqanvx2EEPOFEPlCiANCiAd9jsULIT4SQhQJIXYIISb5jrrcffq7EGILUC6E0NXR3kAhxAYhRKkQ4oQQYrL79TghxCdCiAIhRLEQYr0Qoq372Cqf708jhPg/IcQh9/c2UwiR4j7mmZW7WwhxWAhhFEI8GfJ/XIWilSOlzAWW4++J64QQv7p/uzlCiGd8jtX623N75EO3R34HzvV9PyFEH/fvvFi40hyu9zn2odujy9w+/EkI0U4IMcXd3k4hxNn1+Zx1eFwKIR4QQuwB9rhfO1UIscJ9/i4hxCif84cKIX53+/ioEOJvQohEYBnQQZycKetQrSOKhkNKqR5N5AEcBK4K8PogwAj0B2KBacD37mOZQClwE6AD/gLYgPvcx8cCP7r/vgbYCKQCAugDtHcf+xB4rqb+AI8CW4He7mv7ARkB+toVkIDO/fx8oAK40f1c4+7D00AM0B3YD1zjPv4isBpIAzoBW4AjVfr0G5ANxAfR3s/Ane6/DcD57r/vB74EEgAtMABIdh9b5fP93QvsdbdrABYAH1f5rO+4+9IPsAB9ov3/knqoR1N/VPFLJ7dfpvocvxw4w/0bPxM4AdzgPlbrb8/tkR+AdLcrtnk8Aujdv+kn3M4YBJQBvd3HP8Tl2wFAHPAdcAC4y+2K54CVtXwuCfQM8HqNHve5boW7z/FAIpAD3IPL7f3d15/uPv84cIn77zSgv8/3diSY/wbqEfmHmqlqHtwOvC+l3CSltOCaIr9ACNEVGApsl1IukFLagdeB3BrasQFJwKmAkFLukFIeD7IP9wH/J6XcJV1sllIW1HK+UQhRiSuoeQNY5H79XKCNlPKfUkqrlHI/LjHe6j4+Cvi3lLJISnnE/Xmq8rqUMkdKWRlEezagpxAiU0ppklL+4vN6Bi75OaSUG6WUpQHe63ZgspRyv5TShOu7v1X4T88/K6WslFJuBjbjErxCoaibRUKIMlzBQx7wD88BKeUqKeVWKaVTSrkFmA1cVuX6mn57o4DnpZSFUsoc/D1yPq4B0otuZ3wHLAHG+Jyz0O0EM7AQMEspZ0opHcAcXLcqa2OTexasWAjhee/aPO7hBXefK4FhwEEp5QdSSruUchMwH1dKBbgcdpoQItnty0119EnRCKigqnnQATjkeeL+x70A6Og+luNzTAIBkxTd8pgOzABOCCHeFkIkB9mHbGBfCH3OxCWuv+EaOendr3fBNTXtEU4xrhFjW/dxv89T5e9Ar9XV3h+AU4Cd7lt8w9yvf4zrdsNnQohjQoiXhRB6quP33bv/1vm0D/5BbIX7cysUirq5QUqZhMsRp+LyBgBCiPOEECvdt/VLgD/5HndT02+vqkd8f8MdgBwppbPK8Y4+z0/4/F0Z4Hldv/H+UspU98OTjlCbxz1Uddt5Vdx2O9DOfXwkrkH1IeFK+bigjj4pGgEVVDUPjuH6gQHgvm+eARzFNQXcyeeY8H1eFSnl61LKAcDpuIKNRz2H6uhDDtAjlE67Z4BeBczAn33aOeAjnFQpZZKUcqj7uN/nwRXMVWu6Sr9qbE9KuUdKOQbIAl4C5gkhEqWUNinls1LK04ALcY0K7wrwXn7fPdAZsOMvWYVCEQZSytW4brv9x+flWcBiIFtKmQL8F1fqQTAcx98dnX3+PgZkCyE0VY4fDbHboVKbxz1UddvqKm4zSCnHA0gp10spR+By2yJgboA2FI2MCqqaHnp3ErXnocMll3uEEGcJIWKBfwNrpZQHgaXAGUKIG9znPsDJkYwfQohz3aM/PVCOK9hxuA+fwJU3VBPvAv8SQvQSLs4UQmQE+ZlexJUEHwesA0qFK9k8XrgS4PsKITyJpHOBx4UQaUKIjsCEOtqutT0hxB1CiDbuUalnabNDCHGFEOIMIYQWV06azee78GU28FchRDfhWu79b2CO+1arQqGIHFOAwUIIT7J6ElAopTQLIQYCt4XQlq9HOgETfY6txeW/SUIIvXDV4hsOfBb2J6id2jweiCXAKUKIO9391Lsd3kcIESNcNQhTpJQ2XA7zdXmGcC+oUTQuKqhqenyFa3rZ83hGSvkt8BSu++nHcc0Y3QogpTQCtwAv45pKPg3YgCtpsyrJuPKNinBNQxdwcmT4Hq7788VCiEUBrp2MS1Rf4/oBv4crmTIYlrrf84/unIThuFb5HMCVePku4BHAP3HdvjwAfAPMq+GzAK7ZsDrauxbYLoQwAVOBW915Eu3cbZcCO3Alx38S4C3ex3Wr8Ht3+2b8Ba1QKCKAlDIfmInLdeCa3f6nO+fqaU7OxATDs7gcdwCXsz72eR8rcD0wBJcv3gDuklLuDPcz1EZtHq/h/DLgavc5x3Dd6nwJV5I7wJ3AQSFEKa5bo3e4r9uJazC43+1ztfqvERGuFBxFS8E9pX0EuF1KuTLa/QkXIcR4XIFQ1QRVhUKhUCiaFGqmqgUghLhGCJHqnlJ+AlfewS91XNYkEUK0F0JcJFz1oXoDj+BafaNQKBQKRZNGVW1tGVyA6359DPA7rhU1ldHtUr2JAd4CuuHKgfoM1/S8QqFQKBRNGnX7T6FQKBQKhSICqNt/CoVCoVAoFBFABVUKhUKhUCgUESA6OVVrpql7jgpFK6HSYmW3OYN+Q8YGW7ixabNzqaRwf7R7oWhk/vTBRgbc+XS0u6GIAjqt4J6LugXlLzVTpVAoGgQpJV+s2cXdb/7Mb9aOdV+gUCgUzRy1+k+hUEScb389wHvf7iLjrCsZPOFherQPdotJhUKhaL6ooEqhUESMPUfymbZ0K/b2Z3HJhMnodIH2qFYoFIqWiQqqFApF2FSYrbw49xcO0oEBo58hwZAU7S4pFApFo9NkgiongnJtOg5dHMFvRN6YSLR2M4mOQjRqE3CFAnDlTc1dvY05G/Ppf9MELsnuFu0uRYWm7y9QDlMoGp4mE1SVa9PRG1IxCAeiCTpJSrDIOMpNkOQoiHZ3FIqos3zjfj5YvZ8OA6/j2gevRDTFH24j0dT9BcphCkVjEHZQJYSIA77HtXO2DpgnpfxHqO04dHFNWkhCQCwOzLo4cES7NwpF9Ni48zBTvtpOSu8LuPLBidHuTthEwmFN3V+gHKZQNAaRmKmyAIOklCYhhB74UQixTEoZ4oa+okkLCXD3r4l3UqFoII4bS5iyeBP5ib244P5X0cfGRrtLkSICDmv6/gLlMIWioQk7qJKuzQNN7qd696PZ3rD/3w8b+csL7+JwOLjv5qt57I83R7tLCkVUqbRYmf7lRraWJXPG0Efpmdk22l2KKC3JYcpfCkV0iUjxTyGEVgjxG5AHrJBSro1Eu42Nw+HggefeYtlb/+D3L2cw+6vv+X3v4Wh3S6GIClJKlq3bzZ3Tf6Cszy1ccuckUltYQOWhJThM+UuhiD4RSVSXUjqAs4QQqcBCIURfKeU233OEEOOAcQBvTRrNuBEX1fv9Bt7xJMaSymqvZ6bEs+6T5+vd7rqte+jZuT3ds9sBcOuQS/jiu7Wc1rNzvdtUKJojKzcf5L1vd5Fy+mVc/eBDLT4JvS6H+fnr2QmMu+qUer+X8pdC0XKJ6Oo/KWWxEGIVcC2wrcqxt4G3gbD3/jOWVHL6/a9Ve337W38Np1mOniggu12m93mndpms3bIrrDYViubE7pw8pizZgmx3JpdMmIJG07p2sqrJYX7+CnPvP+UvhaLlEonVf20Am1tG8cBVwEth9ywKuFIr/BEqqVPRCjBVWHhl/lr2O9oy8PZ/EZdgiHaXGo2W4jDlL4Ui+kRipqo98JEQQosrR2uulHJJBNptdDq1yyQn1+h9fiTXSIes9Cj2SKFoWJxOJwt+3MlnG/I464Y/cWnnHtHuUjRoEQ5T/lIook8kVv9tAc6OQF+izrl9e7Hn0DEOHMmlY1YGny37gVkv/y3a3VIoGoQfth1i2v920/nC4QyecEWru9XnoaU4TPlLoYg+TaaielNAp9My/cn7ueaPz+BwOrn3xqs4vZdK8lS0LDbsOsK0r7YR27U/V/3ltRafhN5aUP5SKKJPswyqMlPiAyZ1ZqbEh9320MvOYehl54TdjkLR1DhuLGH60s3kxnbl3D++TGxc+L8XRegofykULZdmGVSFs+xYoWhtWKw2Zny5kY2FCZw1/GG6ttBaU80F5S+FouXSLIMqhUIRHN9s3MMb3x6kz5B7uLxPv2h3R6FQKFo0KqhSKFogP27P4c3lO0g+9SKueejBVpuErlAoFI2JCqoUihbEnpw8Xl64CU2HM7ls4hSVhK5QKBSNiAqqFIoWQGl5JVMWbWSPLYPz7nmJ2PiEaHdJoVAoWh3qnoCiTozFJkY+9l8KSsqj3RVFFex2B/O+38F9b69Fc+F9XHrH31RApVD4oPylaExUUOXDvU9OJeviO+l7/YRod6VJMXPpGopyc/hoyU/R7orCh59/z2HMa9+wLuYcrnzgJdpmt8pq6Ao3yl+BUf5SNCYqqPJh7I1X8r+3n4l2N5oUxmITS1av582bMlmyer0a7TUBNuw+xr1Tl/Pe7zqu+ssUep1zuUpEVyh/BUD5S9HYNGsTG4tKGTnhnxQUl0akvUvP6Ut6SuvZSDYYZi5dw7CeGnpnxTKsp0aN9qJIXlEZj3/4Pf/daGHAfS9z3g33qWCqGaP81fAofykam2Zt5JkLllN0dC8fzV8e7a60SDyjvLv6JwJwV/9ENdqLAna7g6kL1zJh9i7ShzzCebdMICY2LtrdUoSJ8lfDovyliAbNdvWfsaiUJStW8uZNbRm/dCV3j7yGjNTkaHerReEZ5WUaXP+bZBp03tHew7dfXeN1xmIT97/4CW8/ficZKYmN1d1aGTh+BsYyS7XXM5NiWffmA1HoUXCs/HUf01bs5dRrxzJ4eP9od0cRIZS/Gp6W5K8XJozBZCqr9rrBkMTj02dHoUeKmmi2QdXMBcsZ1kPQu20cw3pU8NH85Tz8h1ui3a0WxapNuzmWZ2HW1jy/1zuc2F2rlHwTQ2s7rzExllk4/Y+vVnt9+zuPRKE3dbNh9zEmL9lG6mmXcPWDf0ara7Y/VUUAlL8anpbkL5OpjO73Tav2+v53J0ahN4raaJam9ozy5o5KAeCuAcmMmqtGe5Fm8auhryLyTQwdv2Q9dw+7qMmM9poDe4/k8/KCTci2p3K5Kt7ZIlH+ahyUvxTRoFnmVHlGeX7Tuj1E2LkJY/72CheMmcSug0fpdMU9vDf/60h0t1WhEkPrR3FZBS989hPPfJ3P6Xc9z8CRf1YBVQtF+avpovylCJdmOVO1at1mjh23MGvrcb/XOxg3hzWFPvs/j4bbtVaNZ5Q3d1QS4EoMHTVXjfZqw2538NE3W/hmv42+Q//IZZ26RbtLigZG+atpovyliATNMqha/NZz0e5CjTTFJMfGor6Joa2VDbuO8NLibXS99BYGjbs02t1RNBLKX00T5S9FJAg7qBJCZAMzgXaAE3hbSjk13HabK00xybGxqG9iqC8NtUovMyk2YFJ6ZlJsvdusL7/uzWX6sq3Q7nSunPiaSkKPMsphJ1H+Cs9fDbVKz2BICpiUbjAk1btNRcMQCZvbgUeklJuEEEnARiHECinl7xFou1nR2pMc65MYWpWGWqXXFMomnCgs5ZUFGymKz+ac+15Gp4+JdpcULpTDUP6KhL8aapWeKpvQfAg7UV1KeVxKucn9dxmwA+hYj5aQMtzeNCyu/tXcyVCTHCO50WdDbhqqNiQND5vdwfQv1jNx9i7aDf8754/+iwqomhCRcVjT9xfU7rD6JGlHyg3KX4qWQkRX/wkhugJnA2sDHBsnhNgghNjw9hfVf6xauxmL1DZZMUkJFqlFazcHPF6f6r2R3Ogz2LbqIxi1IamLgeNn0P2OydUeA8fPqPGa1VsOMua1b8ntOpzB9z9DSkabRuyxIlRqcpifv+b+r9p1Td1fULvD6lt9PFJuCKWdUB2m/HWSFyaM4cmxw6o9XpgwJtpdazFELJlDCGEA5gMPSSmrbWYlpXwbeBuANdOqqSfRUUi5Ccy6OKApLiWXaO1lJDoKAx4NNckxklPtobQVas6Eb9vjvljHd5t289HT97Sq2wIeQrk1+fuhE/xz7q9knnk5V074o5qZagbU5jA/f+1cKinc73dt0/cX1Oaw+iRpR8phobYTisOUv/xRRUQbnogEVUIIPS4ZfSqlXFCfNjRIkhwF4IhEjxqfUJMc/afazWElhgbbVn0k6Nv2ZR2L+HzroVaZxBose47kM2Xxb5iSunLFg1PQaLXR7pIiCMJ1WGvzF0TOYaG0U58ATPlL0ZhEYvWfAN4DdkgpJ4ffpeZJKEmOkayHEkpboUrQt22b3cnQbg6+2ytZ+O3aoPsa6mq+prRKLxSKyyp4a9lmtpcn03/MsyQmpUS7S4ogUQ4LPUk7Ug4LtZ36BGDh+AtOOsxYVMqCb9d5Xw+0ok+t0lNEIqfqIuBOYJAQ4jf3Y2gE2m2x1DbV3lBt1Tfny9N2QYmJrmlabjpVT6beHHRfPbfMqj6qBlqefKWqr2cmxbL/k4ebxOq9QEinkzJTOeM+/A3bwD9wyZ2TVEDV/FAOC5FIOSyUdkJ1WCT8BScdljHgWrrfN8378C2d4MlVqlpOwWBI4vkPl6jVe62IsGeqpJQ/0nSTCJokkaiHEmpb9cmZ8LT9yeYT5BeV0iZBg0YDbRJ1LFkd3KjU4XCw5dPnOfXGB4lJqHm01pw2PP7pnX9gNVfisFmwV5TilJJVazezYfsTSp7NEOWw0ImUw0JpJ1SHRcJfxmITxcYTWCuq157ypTnlKm1/9xEc5gq/12ymQl6YMEb5KwKoqoNRINx6KL5Vj4NtKxh5Va2m7Gl78qdfw9GNPHzpyRmYyd+XBJWbYK0w0VFbyomNX5N9ychQPmaTw3Nr0nSiiNiuZxGX0pbEPheji4mjXXb3JilQhaIhCMdh9fEX1O2whvDXzKVr6GKwcWJjy9hH0WBIoujgPtrd6l/VX6vVYlr+WpR61bJQQVUzpD5Vj4ORV03t1ndUaiw2Eeso5/nrujJx6fdYB1xd62xVU2fxc3fy4rx1vPflGk4b8wRCo5LQFYpQqW/V9roc1hD+WrJ6Pc9ekcDffv4eZ6fzgu5rU+Xx6bN5cuwwOnbtVe3Y/gDnK0JHBVXNjIaqelxbu/Udlc5cuoYRp2rpnhnHiF6VrGims1Vmi40PVmxm9SEn597yJPE/jFUBlUJRD5qbv4b11NAtTcuIXoIP9+8Nu5+Klk9Ei38qGp76VD2uC2OxiasfnMKQ7iJi7XokN7pvHOb8HIZ3KqX4l3ls+e9DbH/nkSa/mg9ASsmCH3dyz1u/cKzLcAaNe4bk9Mxod0uhaLY0N3/d1T8RnVbD8E6lWI9sZe9b49n/7kT2vztRrehTBETNVDUjIlmKwZc35q2itKiAC7MSsDuS69Vu1XwGjzwvPLWt95wHC0qgY7+AU+5NrZTCvqNGnpy1nnb9r2bQn5tesrxC0dxoaH8N6ZbJ3iP53HZWKrfNi4y/Mg06Mg1t6AOc0wWyrhjBoDHjq12vSikoPKigqhlRnxV8dWEsNjF7+RpGn64jLdZJXlEZHTJTQm63aj5DqHkMTaVkwp4jRl5fupmimPZcNmFywEroZYX5bHxxdLXXdZqaF5A11O71CkVzoSH9dd+AGLBX4HACtsoG8VdOoZ3sX38KGFQ1p99wffwFymHBooKqZkQkSzF4mLl0DTqnlZmbbSzeZaOwspK2GWY0GhF0u4HyGWrLYwi1IGhjUFZuZtqXm9hVbqD/qGdISEqu8dyk9DYhL59uTkuuFYqGoKH8FYuVudsk034xkx6vodhSQZu0ZDpF2F9/+mAjA+58utkHF/XxFyiHBYsKqpoR4ZZiqIpHJqPONJAoLEw8P5bHVlSQ2udcnrp3WNDthFypvRFrUtUVwEkpmb1yK/M3F3H2jeO5uFO3iPdBoVA0nL++ub8zMzeU4LCUc31vPYsPxqPNPiekWapQ/NXYwUVzD+JaGyqoasXMXLqGy7Nh5e5y5t6cgF4juPfsGG5f/DN/HnlFUPkIDZUnEQkGjp/B5gP5tK9Sk0WnFRiXv8SWvUf596JttD9/BNdMGIRrtxKFQtEc8ARDAEu2lzL35gTsUjK0h2TiiuCLezZVf3mCqSJjnl9dKa1Wq+riNWFUUNWKWbVpN9v2lTKiJ+RVOMircCAEDO7iCDofIVJ5Ej+98w/KjaV0v8N/67VwbgkayyzoDWnEt+nk93rpvt8oLShi+hYdF//pJWLj4uvVfqRRI1KFIng8txOnryn2OgwgRmdjWM/YoIt7RirPqzg/lyfHVp/hr+/v1zMjtmX6eGIzO3tftxgPh9xWY6D85UIFVS2AqitXgmXxqxO4/pHp/HDCyA9fgd3h5HhhOe3TE+lsDy4fIVJ5ElZzJe1vfY7Tu7X1e72uW4K13d6rit1UROGGJdjN5ViknkULFrBowQK/c6IpAJWzoGit1MdhntuJHoet/PKkv3RaS1AOimSelxSakH+/tQUitZGbs58iY161IE75K/qooKoFUN8KxeCf5zD5069ZsmI1wwZfHNFK7Q1JMPlZTpuFku2rsRQcI2XgjTjMJir2rK23AOqzfFotuVYoaiYSDmuO/qpvIOJwONAb0qtd21D+Cue61oYKqpo5NVUSDnXkF2yl4/rOivkSqCaVzVSKThvZnCYpJXZzOSe++5CkMwZj6DcEAIfZFPD8YEd/9RkJtqbpb4UiFAK5R0rZIP7ynBuOw2oKLjQiurW0G9Jf4VzX2lBBVTOnppUroY78gl0BE86I0kOgHKnud0ymT+eserUXCJvNRlFJGSIuFcvRXViP7T55zFSIkM5q14Qz+lMoFPUjkHuABvGX59xwHFZTcBEonyoSaOMSOPbhQ97nNlMhAHGZ/rmiyl9NA7VNTTPGdysFcK1cWbJ6PbsP53lHbUtWr6egpLxe7VS9znc0WFu7xmITwx6ZzvBHZtT53pGmoiiP7ctncTyvAItTg91cjr28CJupEJupEEd5Mdlde5Dapl2j9kuhUFQnkHu++G4dC79dG3F/+Z5bV9u7Dp2g2w1PsCcnL+DxhqY4P5fi/Fw2vjgas/FINX+lZWZx+n3V0x4U0UfNVDVjalq58vfpnwcctdU07R3sCphQZrP27T9EapwIejQY7jY1dquZ/b8sx2yx0ubSu8jZtIoBj82pdt7+dyd6d2pvaqicBUVrI5B7LutoZesJB72zMiLqL99z63LYYzPmka6rZNK0z1n4cnCrjyP5+5VCo/zVTFFBVTMm0MoVp1OSV1TA42MSsTuS/Oqu1DTtHcwKmGDruRiLTSz4di2psU4eu1DPK9/8ElTNl/qWTchMimXtlPGUmy3EJmWgi4nl8MHNdeY3eARQUmDE6b4V6HQ6EMCW6ePRxiU0+khQ5SwoWhtV3eN0SvKLyjitbUy1ffzC8RcE77Bdh06wdec+pl8bx4T/7WNPTh69sutOTajP77e++VnKX02XiARVQoj3gWFAnpSybyTaVNRNoJUrkz/9muJdP5Kis2MsNtEuI5lhPTW8MW8lK9duDpjIGcwKmFBms9rozfTtruWUdMEFWRVh5V9BzWUTEnRwRt/TuPSOGzhl4CBenHibd3myUzrZMt21R1cgwXgE8OTYYd4chKMH93jrwfjmMChaNspf0aOqeyZ/+jUc3chtpwtKSsu8+/iF6y8I3mGPzZjHjb21dEsV3HCKhr++Noclk8PLS6qtdMLzHy6pdo7yV/MlUjNVHwLTgZkRak9RT1as28G2PWV8sV1DYWUFbdIq0WgEdrmJu87UB70VQ1WCnc1a8O1aMFsYOlBP5xQNw3s4eKKG2apg9wCsWjbBXFrIzpULOLx3E/ePe5G4BFe7vsuT6yMYrVbrLaxnMxV6R5ChTF+rAnjNkg9R/moSrNq0myO5Zl5bVUp6vMdhyThFeP7ytF2XwzyzVJNGxNI5RXDTqTpuX7i/xtyqYH/vwZROUP5qGUQkqJJSfi+E6BqJthTBUVN+weCBfRjcsYKHL01h8vcl0HEAd113IaMmTfVL5Kw67V3XMuNgZ7Pa6M30baeha6ogQS/onq6pcbYq1D0AzaVFHFz/DRa7k/ZDJmD56G/egCoStMvu7v3bkpnlHUGGQkMVwFOyaziUv6JDIOcsfnWCd7bK4zBT+hmsWrc5LH952q4L7yxVmoZYnctfI/to+etrc+h0xoXVzm9KBS+bsr+g9ThM5VQ1U6rmFxiLTYz914eUlRQz/9Zk4KR8yi3WOqe9I1EqYdWm3aw7VMkvBx0s/N2KVgMOJxRUSs4y76h3u06Jlqf5AAAgAElEQVSnk0MbvqU4/wSZF96CraSU40cPV6vJUlJg9P4diVFbpIhE9eOmJG+FIhL4Oueu6y7k/hc/4YU/31Qt7+mSt9Zw7wBDg/sLYOOOw/xitbJsjw2NACnBWCFxag4GDKrqS27OfhwOh58Xiox55Obsp1129xbnL2g9Dmu0oEoIMQ4YB/DWpNGMG3FRY711WESi2GWkCVTozrPi7pYz4qvJZ+Z3m9AJZ43T3qEUzquN958ay6AHJnNxmzKmDk1ErxHYnJLHVlSQ2qdHvT7rsnW7yS8sIand6XQYcBMAjsIiYjM7V6vJsvHF0d6/IzFqixSqfkzzx89fz05g3FWnRLlHwdMcHFZutlKUm+NduezrsFisvLOulDnbrX5tRNpfxmITmalJXN/LwJhTrHRP16LXCF75qZL3t2kwm6unKtQXh8NRzWFbpo/H4XDtX6j81XxptKBKSvk28DYAa6bJxnrfcInUCCiS+C4Lvjy7giv+/CopsYKOyRo+2FDGkn0CjeZkdfLObTNrnfoOpXBeXf3SWYr4Yqdk3bGT07ylZknC8U08dW/wy4APHi/g6c82YuhzCQkZ7Uns2Dvk/gRDKMuAW8v0taI6fv7auVRSuD+6HQqBpuywjAQtRUXFfPzVT3x5dxZXvXWAQ6mJzNp6MoDRxRk4Natmh0XSXxpzEe/8ArM2SJLjTjo0FsHu3btoalMByl9ND3X7rxYiNQIKtw++o8yqy4KF04bOXEJKfDxLxnXxy6MKZnQa7DLjuvroufX4wmAD45eUY9clotO6lgUnx0KHrPSg2qosKcBYWMQz3xQw4J7nSDAk8b9F8/3EUWTMQ29IRxuX4Het02bxm63yoNPUvP2Nmr5WtGSausNmbighWe+gqMRGZqKOv16WEVV/fXpnJ65//xjxCfGgPVnWQAccP3a83t9B1eAnkMOsZQUcm/U4uRqt37XKX82LSJVUmA1cDmQKIY4A/5BSvheJtqNJpEZA4fbBd5Tpuyz4eImN5TtLmXxNLA98Vck1/z2EEILynWspt1iDGp2GUjivtjY8tx6v6deeCSWuwM5z/cDxM9h2wkL3Oyb7XZdfWOJNSnc6nZSUmrA5JZkde3LRmL96z6sqDt9lxL5o9LE1FsxrLAKNHIuMedW2lFA0HVqqv6BpO8zmkCzYXMSMobHcvbCSZ5fn8cU2E+3b/Bw1f/XOSuf+C9P9/AUuhxUczq+WV1RWmB/UTFEwDotJyiBj2MN07NrL73Xlr+ZFpFb/jYlEO02JSIyAItUH31Gm77Lg4wVljOwtyIgXdDDA/gILmQka0g0a5n/9M3Nuq3t0GmzhvNr6uODbtWTES4Z2c2B3OKt9V7Wt8tvz0UMs/GkXczaeoN8N42nrk0vQHAk0cnQJNLxCfKpaccPREv0FTd9hr64yMrK3QANc0U3H7N9K6ZgsqDCVNSl/gWulcsaAa2k76F6/6/e/OzGq+U6RpqH8Ba3HYer2Xw1EYgQUqT74jjI9eQW7Dp3guokvcdsZsRRWSorMkrQ4ePxiPROXVTCsdyxaaylDuutr7XPVPAXfqfpg+9hGb+aSrnq6pmn9Co7W9V2ZzWbGvPYtXS4awVUPXIZGo/G771+cn4t0VxbWCA0pGZmA/+jQ9xzprL1gXmMQKG+hpMDIphduqbbfYCgyUTkPilBpyg7z9ZcEzu2gY8EOG/+9Lp6RcysYfqqW7ul6BqSZeGP+yhrzMQPlWXkcVlBSXmfwGI6/asLjAF83wUmHGQxJ3gDD9xyn00HeFy9ToNW1OH9B63GYCqrcVL3vH+4IKBL9qW2U+diMedzWV8f5neN4dmU5nZI1XJit5ez2em7q4yQmRovDZmVoDx0TVwQ/Og0lqdXTx1ibnVnb7HyyxUp+RZm34GhN35W1oowd33yOySq56i+vIcTJnAHf+/5bpo+nw9gpAFiMh73T4r6jw/pWFG6opM3a8hZa0ohW0bQItMKvKTvM11+5JgefflfJHWfq6dtWx8g+emJitRSUmBjZG/709c/8eeQVQc+uBeuw+vqrLjwO8PUXnHSY8lfLptUGVVUlVPWHGOzWBw3Vr7NO6VTjKPOu6y5kw/YDHIqXLN5VQlGFg/ZJgjF945DSydizdDywtJIHzzVgspoZ0j0+pCApUFJrIGl7RqEPX9rF24YnUT7Qe5lLizi44TvMFgvitKup3P4L/3fPcODkjJPTaqHwBVeyuXQ6ODj9brSJqQiNjo4PTA//C3ZTn6TN1jJ9rWge+P4mAwUS0XBY1T4Fctgb81Z6/fXV3lIqrE7KrU5mDEmgoMLBff313L+kkhtOEZzdIYYrsytrna2q+v6BHBYJf/lStc6Ux18aocFuNVP4wmg/fwERdZjyV9Ol1QZVVQvPRXuFTNV+zT+SV2NtKYC/Xpbhqji8qoAf9pRwQSdBuyQdB4sdaJD0b6/lkvfL0Gk0oLNxan7do67akloDSTvYkbCUkoPrVnD4wD4S+16JNiEFu92OlGAy29HGJaBLyqTD2CkcfvcBOtzrEo/TZsVekktMZmeOvV/3PxCBCuYV5+ciNLpqCabF+bl1tleV1jJ9rWgeeH6Tte2LF60+fbTkpxr9YJebvP4CuPH9HE5Lt9ExWc/BYgdCwID2Wm6YbSI7VRdSSZaaHBaOv3zxBFMOux2h0Xkd5pSSrhNnYjEe5sSil+hw73Q/fwF1Oqymgp9lhdUT5IuMeWx/95GQbhEqfzUOrTKoClR4LtorZKr3q4LPX3kkoByvf2S6VwZHjWVYbXZ+zoHXfrGiERocTicxWsFZHeN4/9YOjJpbxgdP31Pje9ZUydgzVS+lDBh0BjMSPvXOVzh85AQibzZSCMr3bfIe06d3pMPtL0Rs889ABfNqWikYqPRCMKhaL4qmgK8rRs/6mZF9E5qYv9bz+SsP1eivWVuN3mBm//EKfj4I7/9agkajAQkOp5M4nWDDwz0wmuyMmlsWMEfKdwbK46mqDht2yVn19he4Vv4dzy/CenQh/PIVQmgBCULjddjBaXeF89UBNRf8DOSwowf3ULDEfzV1sNTmMEX4tMqgync0M6R7Be//bw0/3O9KwovGCplA/apNjrXJwHffLA+1JV16RnCBKhl7rvO0EYq0Dx4v4Nm5GzleZuecf3zJiaOHsFosCI2r/RNznsRpreDop49TcxWWhmX7u4/gMFf4vWYzFfLChDE1Bkiq1ouiKeBxRfd0PVdm28BhA5q/v6C6w2pLsPedgQICOszjtvoGncYyCx3veJH8dUtI7Heta4YKSd6c/wu6jYbAWprvXZjjoS5/gXJYQ9PqgqqqyZNDe8CnGyzeZOnGWiFT9R7/rkMneGveClaPd9UDqa8cQ5nS9h1ZBqpkDJBxZAc2s4lPRxrYeySf285K5bZ5Nfcrv6iMJz7+CUdGL/rd+SxL19+B0GhdWx2kd0ToYgDQJqbR/u4pHP/oIbTCP6ySds92FBKcdqx5B3BWlATc/yqcPAGHucIvkRRcyaSmb16v89qaUHkLiobG12EFJSbuPTuGicvK+fPFjmbvLwjeYVVnxmLjDeQX+l9ndziprCjknauzsdmdXNzezKMr14XcL7tDoktI9jpMSidaQzpOa0W1c4UQboed9BfgdVik/IXQKn81QVpdUFU1eTIGG1d01XDutCOkJ8V7z2voFTJV7/E/NmMew3oAtkpAT6ZBx5DuMHjia6yY9tegBRBKcqrvyNJTybiqtK5+cApjTtMg7JU4bFawVQaUdnFZBS/O/ZlZy9cx/tXPyGzfibLiQizGHCqNOZi+mUHikEfRJWXU2S99TKz3b6nT07FrL+xZ7TAYkjCZyjCZyvxyDOpz600jNNhMhd4cBg9arbaGK4JD3QJUNDS+DttTZEYI6NcWP4c1V39B8A6rOjNGxz7VPvO/3ltC6e6fSI3XYCw2kaKzc1kHGTDoDFT5/f4XP8HhcOA0m6jcsYr4M6+t02Farc7rMI+/AIzusWMk/KXVapHSofzVBGl1QVXgUVAifXvUvj+eL+FuUOo7wvrDorV8vW4nW3YfYn8CzP39hHdJb2m5mUqzJWKjzmDyD3xHcDOXriHfWMDba2N4/Xsr6fEaCisraJOWTCe3tO12Bwt+2sm8TflU6DPolGBly6ovGTRmPOuXzaGbwULe/2bQ0XqQnHmPk3zrf9DEGpC4tpWRDjuW8mKk086BqbcjHTYOvH67t88CQa5G492qwXfa2pM0mvPZ/4UsKU/Nq6rViwHC3dWtrpwFlZOlCIfqDtMBuqAd1lz95dv32nJAfT/T/JWbKCioZNHuHMwWj8Oc9C3ZUeutRN/kdmuFDsfv3yHKcrH89iW6i+8C6VqAg9NBRe4+r7+EAOmwex3m8ReA026vdtstN2c/OZ8+Xi0RvS4ftMvuzlGNVvmrCdLqgqpILDMOd4PSN+atoriomMzEZNrozeQcL+a8znEsvKej3959oyZN5c1hbSO2oieY/APPZ/KI85s/dWb0LCN3DUjmqavS+OeKIhbtlnzw9D2s3ZHDK1/+To/Lbua8O07n0yfGMOPGjjywZCGnXXQNu75fyMMXxPP0ys2c39uA/mAeRzcuIO68MeB0YC85gbOylJjMznS8/QUOTb+LtLbta73f7wmkAL8VOJW4bis6zBUUGfd5JVVw/AiFz9+M0FbfTyspvU1Y32dN1JWzoPIZFOEQrsPC9dfMpWu4PBueWJpHihCcaCR/+fa9thxQz2cyFptIT9AyZ1SXAA7zT3yveivRN7n9qv8ewbn/BzLiBJadX2PrNwxtfBI4HTgrSylY9joxmV0QtkoMca7+1LQ4xtdf4HJYVX8Bfg4rPH6EogALa6TTUe21SKD8FR6tLqgKl3A3KDUWm5i9fA3tYpy88WMBBSYLT1+i4+WfLRSUO7wjrnJL5FYkGotN3POvjyguKuTdWvIP4ORtg5oSYYXThqO8hGsmvcdZV93M4IemoNFo+G72mwzvBT2z4hneq5wv33yW4b1gW56DEacI1h8p5dWrY7nri4Uc37kWbUIKupR2aOJT6Hj7CwH77SugIqOrnzqLhZj0juhiYrFZLQhdDFpDmldGHcZO8SsUqtv7O8Ylk8kc9ohf23lznwr4Pp73enLssICjL5VzoGjORMJfS1av54qONgpNVo6VOXllcEyD+8t3dqq2HFDf2561OUxUlvrVvqp6K9E3ub1LChRVVnJxdy0/Hjax/+O/4EjMRBNnAKDN8EeJy+wUcBWzr1ucTgdWi8WVlwVeh1X1F5wsFJqzdwdyzlNk3fyMX7sCyJv3TLX3gNr9BcphDY0KqkIk3A1K35i3ilhp4b/DExn9eSkjemsZ0EHLoK6Sj9YX8/DlGVyeDfO//plv7msLBJ7aDmUKf+bSNRiPHaJdsp7eWRk15h948OyHlaYp59pu0psIO7h3Je+tK+GizlrWGQs4fdDNaDQayooL2fX9Qv4x2rVaZ1Q/Ax/PWMcFA3rywlYnl3XWMry34JwOOm7qbeftLflIfSwnZk1C6GMD9gHA4XAg0SCdTqQ8+bq1OBe7Ros2ueaZJs/KPrvd5h5NTgVAxCTQbsy/kZyUi2fHeA9xmZ3oft+rAcWjprgVzZlw/eWZpVq5u5x/DYplwtIKuqdpGtxfvrNTNeWA+lKTw0afbWXl3nKmDo1ngrtSe9VUiDH9Enhjxj5eui8bo8mOsdzJnWfqKbdJxvTVM3VtGSXONKTNjCY+Gel01th3q8WM0OhdT9wOsxXnIoRA16ZzjdflL51KgXRgt7sCwar+suYdQEhnyP4C5bCGRlP3KQoPnhUut52VALhksWT1egpKyoNuY8HKjVzRVYPd6eSyzhoKKiTGCsmgrlqm/1TEWa/n8uHGUgZ3cdRY3gCq38qrCWOxiS++W8fTl+ooNJ0cTdbWb89+WOVmG59sKsEpJUI6GPRWDoN76Hn8kgSGdbez7qvPAFi/bA7De0FGokse8fYyxvTVMf27I1zeRcMPB23cdaYOnU7LHwbEkxlrA61rulvazOR88BdyPvgLTqeDkgKjX1+k04k+MxutIQ2tIQ1dWkf0GZ1qFRmcXNnX7s5XyRr9HO3vnkL7u6cgfVbrPD59Ns9/uIS0zCzOnPCm9xGNPbcUiobG44KL25u9GweH6q9Vm3bz4cZS+rWFlBjJLafp+WizrUH9tWT1el4fkcHWnfsYdmocULd7qzrMk8x/2ZuH6dcW0uMFg7s4+GjJT9UWLwl7Jbf11bHkdxNv/FREnA7uPVuPAO4+x0BWIuhsrveV1gryF79Ezgd/wVpWGKCosECfmY0+Mxuh0aBL60hMZmdXPlYtOG1mr7/ajv5XQH+ltmmn/NUEUTNVIRBohUtty5cDrSZJT9Tx5LXZ2C0VjDsXJi6z0KlDB/olaplQ7MpHWLVpNyuPGTlnRuBbc6FM4c9cuobLOlo5u72OEb1Pjiar5k/59nP52h3sOVbJtCGx3LfYzJubJImJybRLMfPI9aeSkajn/iwbt81ZyMCht7Ln15/4Nc/MnC1HACgrMiKdDiqtZr7f42REbx1jF5n5v8sgNUHHDadoeX9PLln3z8Rps6LRuwIsa94B8t1T2oHQxCSQ+/HDrhIN5cVo4pMQWh2amAT3fxOFQlETHhek6BxBbxxc1Q3vPzWWUZOmct95kKG3cP+5MYyaV8Fj13ZgwiWmBvHXsJ4aMmOs3kDn4azYatt2VZ318nXYxGUWvjoUC8STHFvJk9dmk2nQ8fc2dkbNrZ4KkVdUhsPhxEkxFpud9DjJbQsq6ZIiKCy3M6ibjo+2FJF1/wcAXofZjDnkza2ldpXQuPyl1eEwFRKTlIHdbkMbn6z81YJQQVWQGItNbNh+gP1x0m+FC9S8fDnQapJglkLXlYga7BS+R14vXAyFlU7O6yS4c2ERM7fY0Gk1fvlTnu0ufttzhIv79aBvUimL9ju58JREdP1vR6PV0ungPB79fD//uaU7DoeDyvxDfDPrTe5/+ZOA/fxu9pvsWvw6yw8KOiTYGPGZmfjUdBzlFWhtZViNh8FpB3f5TwHeKW1w5QZICVpDGpqYBDre/gKVeYfRpbYlf9bfyRrxqDeXIPczl8wsxsNhLysOlaqrZUoKjGx8cTRCOv12dvfkLKh8BkU0WLFuB9v2lPHFds8q3ro3Dq7JYTFYKKx0zRb7Oqwh/DV3VBJFRYVc01Pn5y84uW2X75Zj97/4CRf368E1nSq4rn8Ku0yuwaqp0sqi5asRQnC8xMY9sw5zaY80knvUnApx1u3PUlJcSLEZ1uTYWX4iBmdFJRLh8hf4OEyiERq/nKWCvOPoklwrjfVpHUgfPB59ZjbH3p/AmRPe9OZD+foL8NtkvqFR/oosKqgKkplL15zcby/AppuBZqWqjsbCXQrteZ9glhJ7+jysp4bzTjn5w5hg9O971e0u4jQOvtu0lz9dlMlzQztRVGnntjmLccal8cPhQtroK7hy2l7sVgvt4p1sWjGfG8Y/Ua2fnjyr52/qyQOz9/KPKxJ4ZHkl8vSraTvoHrZMH09CVhe/pHIAW5uT9ag8ZF7nSjI3G48gNBpshUexmQqpWP4a4JKA0+YqGHps1uOuiyRUHN/nbcMjQIepkOMfPQSOyK2cUTu7K5oDgwf2YXDHioAOC5TjVLvD4OQ/H6GVcwjVX5kGHZmGLHoR2F+jJk3123Ks8PhhZu07XG2XjPxSG3pp59xpR7BY7bSJc/DOL0UMKAkcVBqLTRhiNRhS9Dx2kZ475pcTV8VfgJ/D7O5SLb54/CXcpRVsxhwcFcV+wYlOI7BaLP7+yt3nysMSopq/7GVGunQ/pc7vOxiUvyKLCqqCIBgR1DSi8x2NRaqcQ11LiT0EU5nY0156vIZUbSWFZg3tDZKxF7VDoxFkJOoZ3gu2pg5EYy5ixrBExs0voDCvjLdGJHPLnFIO/P4b3U47y+89flj4AZel57P+QBJj+mrpniK5p5+O6Ws/Y++edViKcjn87gMIISjQnvzf0F7myqnqft80cnP2c2z+v9GltkVKib3oKFTZ1MYzQoq96kFiMztz9NPHcVorcJgKMS6djNtKCF0sGdc+gHQ63e8hvds5qNUwipZOXQ4LVGahIRzWUP7y3XLsneEJTFhc5LdLxuXZsGC7jdl3dWbs/BJyThTx/o1JjPq8AovNWW1fQU/h485xlVxyqo5+bQV9szTsqeIvwM9hvv4CKJn6R3SproR9W8EREJ5Z+ZMe83WYcfmbXn8VLHsd6bAR0F8Ou9/1yl9NBxVUBUFdIqipxkkwo7FA1LYypi7R+F5bVYC+xzzPv1y1jtv7ann0yzwmXRTP8z9U0jdLx5XT9mJIOvmjtMovubNfDD2z4hmUVcg2h4a0WCcjT9Ux+8WHeGLmKu+5ZcWF7Ph2Dn+81MELP+bzyU3xmCwO7jnXwOe7yug56Gq+nP8Zne+bUe2zH5p+cmPSdtndKTCkkD/r7+5VfGXEJLlWudS0wsVWdIysUf88KR23u07MfZri/00joW0XbyKn59rHp88OWPDOZCqrcx8thaI5UJvD7rruwmozUsEUB66JxvRXoC3HYtBV2yWjsKySkb0FvbNiOb9NBQl2DR0Mght6a5n/+36/8grgWqVdVGAkPlnDbX0TSI2FUzO1FNsEpwTpL4CYxGTyZ/0dAEtpPrHuFcvxWZ3pXsVDAE5rBZnD/uba5kYIPMueT8x9mrx5/6zRX1D9Np6ncrsqzNm4RCSoEkJcC0wFtMC7UsoXI9FuU2H52h38ttvIzN8qvffyoXpNp6o1ToIZjQWituJ8weQr1HRt1WP//nAZ5eUVpCS3Z9KFFrqladhywkaxSCWrIpE7X5yDISWNsuJCZv79Fq5pX0ZleRw3nSLYdBROlDn487l6Fnx8xG+26oeFHzAku5LNeXBdT0FWgiRGI5DSwW1nxPDW8tkIKaptsQCu7WN88QjEsyv7mRPe9DtenJ+LFBrEopcQWp17ZAdoq/yvLal2rS9qk9HWTUt2mLHYxNS5K0lL1Aes6wTVN0z3vFYfhzWWv3wDRZvdidZhZtRpGlYf0fLktdlsnlvG5688hJSSUZOm8vAgA7sOneCGUzRsOAoHiuyMOEXL9wdtfPbVT/x55BXe1I3Zy9dwZlsNF3TSkhoHsTpIjROMOEXDrHr4C1zFPwM5yNdhzspSpN0CWr3fOUKjAae91pV9ymFNg7CDKiGEFpgBDAaOAOuFEIullL+H23ZT4Zrz+mArzWPY4Itr3JTYd0T31vSDHDgSX2thupoIpzhfbddW3Vpia56DVRsOESOSeXRxHjpHBQY9SOkgK7mE4aclsu6rz7zbzVzdsZw0nZWi3Bx6pmu4obeOb/bb+dO5Mdx+ht5vtmrLqiVsLbNRbnEAkqm/WNFoQKsRZBr0GIQDQ5wh4BYLFp+cBFt5CccWvUSHG/9e42eWQkO7W59Dk9wWoYshZ9odriXLnpILjZfv2SCUFRfy2SuPMmbSfzCkpEW7Oy2Slu6wmUvX0CNNU6O/Rk2aWm1GKibOgLEouI3Zq7bX0P4KlKNaWm7GabOQqJe0Tzbx6KA2fmUchvV07V9qtljpka7hxlN1/G+PjdvO0HPL6XqW7TF7Z6tmLl1DLFbWHXWw7qjD669SiyQtXkNKogjKXxC6w3I/nQRC4+8wQGtIx2EqCup7bEq0Rn9FYqZqILBXSrkfQAjxGTACaBFCqksSgabV778wvdbCdLURTnE+z7XOylLOSK6g710vE5/kKshZWVbCDd0tfLjBiV7aKEroyhMfTQfgrUl3UJp7kPxiI6kxgm3HzWzJPYyVN/h2+VJsxkPESTNLt+kwmmxo3YGKU0re/9VGSpygrPwIppIipJSkJOiZdc8ZZCTq2b1nH3fMLUGXlIFWq8VTZUWWnRRETeIp+fUrOlTspmTTV9DpnJC/y8aivjkNwQhn/bI56E5s9Qa4odAahVZPWqzD6uOvYT01tRYHro2G9Netp1jpnZUWML9ryF+msm3PQWK1gs3HrbR/Zi8IsMtvAIjROJi80ond4UQjXGMtu1Py3q822iQI0uIF87/+mdFXDXRtz3V/ZzINOtbuzmXM5yaSkwzYS2zEp6ZgEyIof0EVhzVRwsnJqs0xUkp+WPA+zqOb+X7ee1x68x9q9JCUkrKiAr/XTKVFLJrxT2584B8kJqeG8Ikij04ngG7BnRuB9+sI5Pg8PwKcV/UkIcQ4YBzAW5NGM27ERRF464anLkkEk0wZLKGsjKnt2uN5Zm67uBvLckvpdvszOO02trzzKCdMdiYMyWbIscPcMe99flz1DRpPgmW5nTtPj+Wes2OZucfAYu1VZF48xtX2j7MZUvwZWRlpmMsKuf1sA0miko7JGl772YLVITleJvnX2GtAo+H2XhWUHzdSLiWJOj3DTo3h0z0OdElp3vv7vhuI+orH6bDjLMtj9xvjMFQc5akh8Ty47BOO2xYQl96BQ58+QYcb/44+IcV7vVarxVp41Pvct7Ces6IU09dTkLL2YqHhUN98hboCJs8KSs9+igOH3hpScBROQNbKqNNhfv56dgLjrorMyquGprn5a+aNiRQVmRl9YVcWHS4k9tTLAShftwgNDt78uQSHw8lLH/2P52b/hMa9os5qruScthrO76hlvTGe37W9SO/Z39v+oRUz6Z7ipGeqk4EdtZgdGvTCzsZjDpwSLuuq44udFZw99kXO6yiY/G0JSNBqNHRNFWw4YaXCbMGhjWPIjbfwxcKFGNctBqBs7wZiT2zlyKJXqSw2Ii1lbP/PbYBEbyni6tP1fLb6I5x2HSd+nEvhb1+TftbVaGNchUydVjOVu37EZrNjNxVRvvNHzEd3eCuwS2slsuAQTqH3vieAubSQTV9+6Pfc93hN51XlmmuuqfFYbdcBbF3/I4W71zH/1Uc549yL/Y4d3reLrT8sZWz/BD5b8Sld5GGM+g4kpSuwxD4AACAASURBVLer1s6R/bs4O9NGRorB+9qv67ZSfnAnm97/O1cMPKPWfjQ0en0MDDs3qHMjEVQFusFSrVyslPJt4G0A1kyrvZxsEyEYSURiRZ+HUFbG1Hbt8TxIT9RxfU9YuOITio4d5tqecUw6R0tWkh5bjOTmc9uxWHsp9i7nYS0vIX7lq1zfU+Cw2xnRJ4b5C77C1n+oazf2nSu46YoYXv65lGPFDt7bkI8Q4C7ThcMJibFatDGx6FPbsvDQMT7fZ3dtlpwUDxiIy+xA5ztf8Y6IPKMjp8NOQtlhb/CkjU3nlEwd9jYdGNJGcFH/TO4pN/L2BguWijza2oo58NafvLVfNEJDu+zu3u/hhNNB7sy/uhI8hUBrLqRXXBm7nILdb4zDZjyEPrMLupjYqK6OCSZg8lSq9+ynGEpwFG5A1sqo02F+/tq5VFK4vxG6FR719ZfT6cRmd2Cx2kJ6v5c+/pqsGAtf7jhZJy4rxsL4Vz/n6gv61Xrt1z9vJivGwti5FXQ22HDGGOmV7GCv2QwIRvRN5NpugvadXf8g7zheynfJ15I+8AZy9/9O3I9vMGGQgQRzPuf0TuXBpQcoOG0o7br1wV5ZRpLufQxxenYVOvnd6CC/rNLrMIcTdhZqKLeCTqNhR3kyW7c7cJrLiDG4Zkhi22TQtld/HHt/ZspNXdj6k4HCTQtwOp3oTfn8c1As//fdelJiEumSqmXwpWe6PljuFu4/P5ms9FI+2GynfO0csnUm8tbOIS7R9d9FHxPD2RdeAcD6I79R+tsydIZ0kBIhBBpzMT1TJftKbeQum0ZSehZ6vZ7uWQaeviTG+x0unaOnW58zq3231k0L/M6LFAWl5dy/eCsf3JLBYyu2MuGs80lPSvAef9tYxJUXJfPwpSmkxpdAkuCpYd2x2uzV2tJe3o+stJM+Nhab+OKrr/nynvaMX3KcO64YHZENueuNRl/3OW4iEVQdAbJ9nncCjkWg3agTTpBTH8IZNfpee9RYBnFHKDNV4nT+gFar4ZtiO99sc6LR/orT4UBoy9B22IS20zlwbBs39IkhzeCq9ZSenMCIXuV85Z6yHtFLkBavYeYfXKPzfy/6nc93Chw2C1JKnDgo16XgjE1Ce8k4tFotwuFwJZY/4J+Y6XTYeefJPzDx+TcwpKTx3ew3OeX4Qi7qn8nY0jw+3ljCU9e044HZ6xh9VR8AxvRP4YtdhTgTdUy/vi13zzzAPf+aRtvsbt4ZL88UPDo9mdc9gnTawW5Gv/JV/jU0m/FzDtO2QxuE8xDaLh344wsf+fXLYEhi71vjsRUdR5/W3juD11CBV10BU9X9FMf0T/FWsA8mOAonIGuFtEiHBfLX0B6Cv76+gGsvOqvG6z7/cRep3ar/41wXq9ceBFss6/J9a8DFkuiUnN337lqv3fHlXykviEWn1bIt14jOoAE0lB1ZgpROFgkbizY40Wi3A+B0ONBl7yQmOQN7zm+M6RtPr/bJWPIKic1KZmRfB7MP/0pMv4sp3fI1N52m5+mRfb3v9+9Fv/PxrxVotFoEDgpJghjQpWQSO+QRdG5/9fJJLD/+1TRwOBj3wsd8++p9ZKQkMvnTr+HoRsZenEK+vYj3N5bz/qgs/rBoM1qNYP6tyWQadEy4MJmVh0pxSi1vDs9m5MzjLHllIr2ys+h+x2Q0Wi07F03jvLsf57upj9Dm+klIpwOtvRLNysm8ODSdBz47QHq8oHuPVBa+/EC17zAjKZaNMyaSlJbpVwC5XVoi7TNTqp0fLrOXr2VkHz2XdE9kZB87y9ds9asftnr9loABfTB9CXePymgSiaBqPdBLCNENOArcCtwWgXajTiSnxmti4PgZGMt8E9qTAchMimXdm9V/ODXhGXFWmK20u+k50s8cTudLb2PrmxNoW2Xnc+9Kujtf4ejBPdgO/cZck4XZv7iW42oTc5FOO5r0TZQf3cMnWPjkJ1dA5sGhjUOkd6f7fdPYMn28d3d1z/vUhLOiGN2JQtZ99RnnDhntFzhc39PJoo0mvt9dzJi+OuLsZUAcGYl6ru5YzrYTDjrEaBjRw8Gc/0wi/+jB/2fvvsOjrrIGjn/vlPTQEmroilTFVdG1F7BHWUVFRBDLigUVdXddxFddd10siw2QVVEBlS64imIFBCtNQBEEpJeQTHrPlPv+MSUzSSaTZCaZSeZ8nifPQyYzvzkRPZ659/zOJaZNGntm3Yet0EJ3cx75diOZix7DoAwkxsCIPiV0N1sZMSCWxb9s4OObk7lu0TqOHdxLx26Ve+STps9n5fyZ7P/qbXoMHR6wAAmmX6kuBVPV8xTd88LqUhwFW5BFoRaZw2rKXw6t+fXQTyxaudnzWNUVqRNPGcJ9jz5Y5/epvJU/Hszxnsfrcyv//a8s8fx58rh0urruYvPOLd5DNjc+M5JTxzwPgP3oDhbtK2fRzxnYCoswJTvP37PH7mDbrIdRWbtYqMtZvK0yfymlsBtiOf6RD+ucv+zWCuwlReRm5PmMoqg60iE10UR7cxkndjT6FLTnp1Xw8zE7qTFmhnbXnHHbFH58axKpybFsfe0hOpkL2frfBzFpB5ZFj2EwKNqYHdzQp4wepjKu7WcmxqRYtuN3dh3MpE+3Dj7xjbmgP8u/yCT94sF1uiGqrgda+3t9oLlnDV2QCGYbORIEXVRprW1KqQnAZzhvR35La70t6MgiQCi39vyxFJYz8M/Vb5Pd9sbD9bpOflEpMz/ZzMbDFZhbpdLpwjF1fm3r6/8NwJHZE6koyqXjlU+iHTaMJhOlHzxLm7EvYM05TExsLHa7HbvNStaixyHzKKbd27HZrFgrylGAKSbW7/tYi/NJshcw9Zq+3Lt8GeVlpZ7CwW63k2Av5KYTY3jv52wMwOzNB0ls4zwTqzgvj34dYykrzOP+sxKY99pmuiQrTO0HcOMjL/LupBuZkd6Ze5cXM+aZhWitmfvI9YwarOjcvTs32vfzxa4SHv+qhOF9jXz46pP8ecocT3F01fjJ9douC6ZfqS4FU9XzFN2SMr4N+H7BFGTRqKXmsKr566gln09+3MFnme044YxhPj/r2rsfRlPD/ncQ7lv5Ey6ZSGxqdwD2vjKaTlc+CUCsw0bh8ql0Hv8u1pzDdD++v+dYmKPvTUIrOODKXxXlZSilar1Z2BQbj8lezMxr05zT28srfEY6mOxljD7RzJz1eZRV2Hh7QwnLf1cYDAqHQ5OVW8iAjjHkFxaTkqDokay597n3+GTKn51T4dO7cffyEhY/P9Ez2mHEwy8xejAkxChGDDBx0/ulDO9n5m/TFrPsuXs9xdGUe66t112XtY2uqItARVMwCxJNvUMUaiGZU6W1/gSI3NsbWjCHw8GC1b/w4RYLg66ZwCVde7Hxt1GehGYtyvF88qrTmXgOG5aPp6LtNkwmM47SQqw5h0BDRXkZGEyAwpjg7DUwt+8BBiPKFIO2VWCrKMduc+6Z22xWDuze7rm0bfuXXN9HkVSWwVV9knl39Uf8quws3HqIsuIiTPYSWsUZ6Nomlrm392f6Wgs7O18DwAlHl3HLQE2sNQ+DUrRPgKcviuPeT9fx1fz/VtvqAjxjIApyskhWpZyQYiSz2M4pMUYsPztXq7Z98ymmYz/z0cx/1Hm7LNh+pboUTP7OUwzV9YWv+uSwNz7ZxI7tvzRyRKG342A2J149ngsuGIQ51v+Hn3Dzvhut3vlLg+Vj54dUbbf55C9nLtJgMKEBY0IbzO17oAwmlMmMUgYcVv/5K3/3Vsb1UThKC0g/Poa5KzdhUg7PSAdsFbSKU3RpVcyK8T18jgJybxPeNFCRnVfIN/ttvHxZHGM+2M/UeZ/XuNXlPgC7lclOSamDHw7Z6dbawN48BztznKtVH63ZTG7GQc9sxLpslwUz9sItUNEUzIJEU+wQNSaZqN6Mbdl1mIseegO7OYGYhGRWbqz8ZOh9l13VeSpGoxFrUQ57Zt1HriUTc5JzSrkxLoEY2tNl3Euepfat0+9GGcyeo2IM5lgqLAdcgzZtKGXAEBPP0TkT0a4p5o7SQlAKbS0ne8XLzses5XS0H+Paa5Ow2+2MOqU1y7bnYGrbjTGTX+a9p++jKPMAJcDeMjh3hrMgSMpwzpnZmFHCrNUWUhMM5Jda+VNfE12SYeRAI+98Po+pD1T2X920cBnWmFas2ZvHim0KS1EhsUaNQcHMK+N55tsKhvc18/7Lk1HFFl4Z3pmxb6/jsnP7+VzDX7EUbL9SMAVTJFw/2vW49M9wWkngJ0aYfuEOwI+aTjOAyn7GmuZBGZTB5+B1dw5TCrrd6sw55ZYDZC9/wZO/wHn/isEc4zmHTykDKjaejDkPOnOWqxCrKX/FFx3m6ktaY7M7GHtKIkt35NO2bXtmPz6OW596myOZziNqjpTDaTOcBYG7EFi9aSeHMsp4cXUBRhxc089MaqLi6r5G5q/4jg0POFfavLe63Adgv78VskvsJJgV0y6P46HPyvlTPzMPvDCfsuIiXhmewp/e/p1n7+hW7Ro1FUuh6FdqzF2cptghakxSVDVDOw5kMWXpJmK6DcaQ3J4+d/oel7Bt1sPk7vudCVf9EZvdisnzaUtjNJkxGo20Te3A07OXM2XCKA7v34vdbkMXaJ+DPA/s/hW7vfqdGlV1uP4fxMTGeT5Ruiefe/cqFKx6nRtabaZL93aUZx0gJdFM15gCso5mse6TBQELAXdD+w2Dk7jz7W2MP83MXz8v574zYvhoexna7hyZ4N7qWpGVyB0XdGfCuan8c/Fm3t1Swc0nmemUrDixg4F3NpeDcTP3XtSVLjEljBpk4sttORx/QVqt22XSryREaFXdPtw262HsZSXkWn5HOxzV8peb+7Df+68+A2tRDtrhQLvzF4DW2GxWdPWb0X10vPFpsNswx8RSkrGHvE9fqTF/9bUU0rVbGo7cw6QmOfumMo7sr9OZiB9OncAL731Owe4f+XJ7DpPOi8GA4tdMB3HKgd3hjNF7q8t9APawLkU88EkJgzoY6NrawKXHm5izuQKj8QB/uyiV1JgKbhpkYvmvRTzUIbbW7bLm3q/UHEhR1YwUl5Yzee4a8uK6cuZdL2A0mfjfsmXVnmcvK6HTjf8CIOvTGWR/4kwM9uJcDPGtsJfkEePqn3CvZmVnZqCUAa0dZC5+0nMt76m+VSlzrGeFSimFrdg5EE/hbDCtKMzh8HuTSBs9xdlMeqCMRb8cwVZUSFKb/dgLC5l2VRIPLJ3JwHMu82kcr8q9pfXaqkOM7G9gzmYrheWaL/fYGNrbxIUvbaeNa5qxzW7HUbqJUZe6Vp7O78vy3Tu467L+tG8Vxz2drawpyMHhcBZFZbmHuOw4E2OXHWPuVptnm6Gm7TLpVxKicdnLSjyr5ZnLX6qWvwDsFWWe5ye3a08pMZRmHQJtr56/HHZq4s5faI12PcdenIdSNeevrTk2LnvzGLbiQjq21RQUljIzPZF/r1zHyk07mfP4rbUWJqs37WTj9jyu62fEUgxzt5RjdUDnJMUpLx2gc0rlncYph7ZjLStyFT+JONQBRg+OZWDvzjze2cGWggIcWjP2lERyc3O49HgTY5blMner1XOUWk3bZc29X6k5kKIqzFKTY2tsSk9Nrux5KC2vYPYXW1m1s4DTR09mYEp7n+e6my/dbDarpy+g4w1PYcvPdBZLCx9DGU0oZcTm0D6HBSul6HrvXBzWcgxm53trh4ND027i2MLJGOJbOZfF0Z5Cy5jYxnk9Uywd0ieSvfwFUtIf8jSNlmYe8PQ3JFwy0bOMv2fWfQy59EpOOLqMAV01w4874mkc98e9kvX4taewcFsRMUYH0y6P474VZVTYDajYJB6evQaoXNVyFz5f/ppT492EP2fY0HllpHTuTvuuRsZmO3u4aiuOpF9JCF/BTOQGyM+2cHjfLs/37htf7DYbKZdNQBnNPvkLAKPRJ3/Zy0roMPKfmFp39M1fM8ZybOH/YUxs48lfANpWgbaWO/NXTDwpw8aT0Kk3B2bd6zkouWr+Mu1fR/9zLmXH239n7OWD4PBGLh3cmm8PZ7D45/0BC5MPp06g5/C/s2R7OQu3lRBr1LxyeRz3ryjDYDSx4Z0nPc9192ClJpl4YXU2w/ua6dnWiCWviE4prTg/rYKtGTby8hz06pJKH6OBCZb8gCd5NPd+peZAiqomUH1sglOgsQlD7prOoax8CotLMSe3paSohGWfX4nSDtq0dw7By7VkYiovJ6FT5QBMZTShDCbnrCZAawcxqd0xJrWj8y0vYbUcxGCAoi9f8bxG2yqcq05aYy/M9vQcYDC6iiiFdthIu+stbPmZgAa7DZTi2ML/I2PBY8Sldq3TPw+H3cZva5bx2HVJlOUe5P6zEvhgdvUxBzV5aukmT9F01mmp3FpqYWvbSzm8+1eK8nNJat2W7etXs2bPYeZvLsFgMJBjsWBSdmZvOUxyW+ffQ2FuAeVWOxe/WYAprpS4ROck30DFkfQriWhTW8/TpOnz/Y5NmDJhlO/JCdkWHNrhk78A7Hab54MYuPKXqXJYZdX8BVCRuZei1a/6vF/2ipfRdptv/qq8KtphJ+3ut5zbggWZYLN68lfmoseJaZWKwRwX8J+H3W73bKFZbQ6u6GVn5W7Nsq9+DLiNtu9/znO63UXT8CGt2Vmcy/xfNdn5xZ7XfrFuO7/+ns27W8o4mlOEAQdTvy/DaCymQ9syMnOLKa1wcOGbJcTHl9Iq0Rl3oOKoufcrNQdSVDWBhoxN2HPYwvZ9GaRdeR/dBpyLwWji8L5dxKZ258jsifS+YxrbZj2M80QWTWlm5WwVba95udufvKwMMBixF+V4Hutw/T+cy+JojOY4tMNBxvxJ2PKPgcMGOA/QMppMmExmWqek0vuOqWQc3OPprdIOK7ZCC/unj8WgDJ7DRs32Uq7qE0u8rZDYOOiQZGLUIFPA1Sqouadp4ayFJJsdni24/kMuIKFgLz2Gjq71Dj7nGIZEzxgG6YkSorqGjkyo+rqa8pe9rAStg8tfAOW5R8Fg8hRTHa7/h/tqzhtsjDHO/JWXUUv+mlZr/ooxarbvXInJXkr68WZSk0xkZBfQs62Ra/uZWXu4bk3fVfuarjgOXvs2m1eXrOL/bncWoRef3p/y/EzSLz67xuu5D8KemZ7gM4ZBhJ8UVSHkb0XqaFYuA+t4jbJyK5Pf+ZZjxg7Ep3Sm7YkX+n2uvawEc7s0LMtfqFwWx9l7oAwG6nrcnVYGOt7wD2I6Ole7js6ZSEyHXs67/Ow2zwqUMhhJ6NDDZwgf+J7M7n1kDICtQ2dPQ6nba3+7mfmb93nu5jManEdFZJZu9Kw2+VO1p6lNnIHLu5XiiG3F2jXLGHD2pXUadyATx4Xw5W9FKi8ro1Hez907dXD2Q55tNnDmL6vlIM6tutomR3kxmEi7600Mrj6pmA690FpjzdqHAuI7dA86f303/0X+O7IX1096jXk/W3h3yzGycgton2DAYID2iSaWf123GVFV51vdcWoMcz79jnuuuxCtdcCRB8154nhLJ0VVCPlbkTo85c8BX1tcWs6LH2zglywHJ19zP/06d+Pzr76ufq33JmEtzPE0UhqTnAWDISaBtNFTANj3ymhylv+HivwstMJ5MGhiG5R7HIJynvGXa8lkyoRRGJTz053nIGLteyixtzLLIRw2m08PRK4lE1WPA4vHP/euZwtvwrmVCW36WkvA4qbq1p5zvpWVPh1KuOqExDrNm5I7+ISozt+K1MZnRobsParmr4NvP4C22zDGt/LJX7mfvOC8o89ud86U8spfAGjtyV9JSclkHzvqelhXz19V6jK7zVotf4HzrsOBd1TP3x5ao3XlFpp7C++h8yqPXXlhTX7AGVHTFq+ibWIM834u95lvFYudOcudI2RqK5jkDr7IJkVVmGmtWbj6F5ZuyuCkEQ8wtGvtPUWOihI6jvwX8R26U5p5AHNqNxzWco7Ne8TznJhW7Tlpwkz2zLqPpKRkDu77ndT0h1EG11+3BoMpBnNSO4qKCmnt/qTmp5DyidfhwNwuDbPX5HRzUjvsxXn1alZtaMO399bekMtH8u6kG5k3sjUpiWYyC8p4Z0bgeVP+7uBb+/5bHPr91wYdPSOECCwk+QvnnXvu/PX07OXcfcVpzh/UmMOqrnYpnx4uc1I7Ogz/K4ffm1Qth3nnr+6nXki/sfex4Mkx/LF/1wY1fc/9+Dt6tTGQfvHZjL3yLG7428ssuiGZ1CQTliIbI+avw6E1y25yFmo1FUz+7uCbsXgVW3YfavDRMyI0pKhqBN++8QQVZaWe7+0OB6um/Y2YuHjO/vM/PI9XVFRw49TP6PrHPzHs/otQqo5L3TXQdpunF8A92NPdSDplwigOLnnSMyDPzRiXADgPUUapyuZORWUsyussLIedjAWTMCW2xei13WiMSyApzuQz98rhWrnKtWRyT/rpKO2ga68+nqbWhjR8V51k7n3MDUC8rbDavKn03g6mT7yeCS8t9hRK/gq6Cv0RncwlshUoopq718nN4bCzdfrdGOMSal/JCUKw+UsZDM5jZgyGyvzlVWCVWw6Aw87+6WPRDgcxrVJ9rtOpW29KUjuQlJTsN3+1S0nh/qvO4KRezjP36tv0XXWSufcxN1DlfMCkFABaxxk4tW0Rr76/iv+7zdlv5a+Yszk20s5cIVuBYSZFVSOoKCul260ver4vOLofs8nI0QWPse2Nh7FabeQWFJCQ3IZzxz9LbHxCjdepeqtyfrYFm92KdlgptxxAO2xoWwUGpTCZzJ4+gXLXYE839ywq7+ZQcPY05Lqa09uj0TZXgjLHcXT2A9iLc0Fr2nVw3qnT/bgTfLYIvEc5ZCx4jMnj0sm1ZGJMbk+PW3yT75HZE2vs1wjE++Diqn1Q3sfcABTmWtAOOzadxXvbnAm1rLgIa7lvoVRTQeduXJ+a3rZaL1YwhycL0dy4e53cSjL2YDSZyFjwmE8+CjQyoTHyF1QWfe78NXlcOg6H3bnlZ6vw5C8AW2G2pxG9+3En+FzLO38d3reLXEtmrfmrrKyU/r26kBAXQ115H1xctQ/K+5gbt8zcYqz2yonsBcVllJaWkXJ4k6eoqqmYq2xcb1VjH1awByiLupOiqgmYTUZO7NURlZLMiAtO5qhO4azRD/tsodWkpluVJ49Lp/vxAwBXUVNwDLvNSnlBlqf3waAMTB6XXuMp8VUTZrnlAHlf/pfMhY+jqpytZUDTtXcfz6fFoqJCci2Znn4Eu81GTLs0TDHOpfjed0zj8L5dHPvgWcDZf+WeaVVRmENuEX7j8sd9cPHa999i3/rPfPqgPtqVz5hnlvotdArzcpj7yPX8+wwzk1cvqbVnqrbG9WAOTxaiuTOaTKT17FOt2AmkrvkLoCK/ev4CAuYwd9N5zpQbOPza7RgMvjksxmzilQ9/8OQv9we/w/t2+eQvcG4DpqQ/5MlfUJnDKgpzqLCWMe7peUxIjCcttVWt43Dc3AcXz1i8itXrtvj0QS3fXcji5x/2W+S4D1SefFkMT68z+IxcqOl9auvDCvYAZVF3UlSFkHuQp7WowDnd18XgsLJ/w1dkZueSetmDnNCpbvOcvHkXNVunV/6P3RiXQJthdxHr6kPwtmnK9Z7k5H6d95Rgt4F3TGXPrPtqTZjuFaqt0+8mNrU7ZRbn7+fQGmtFOTZX86fdZvW8RjscmFOd51EZk9rS4aqHSevZp86n13tv9930zgLGnNq6XpPM169YyCVpxfRKtnJxmv87/GprXNdaB3V4shDNhXtlyfsQY6jjQcYB1Ja/Bt4xlQyjkVP/vtDnNe7jttyFkPt1FQVZ1a5/2qRFteYw7xX2rdPvRuNsdXDnL3APTbb6vM6dw4xJbWl18mVYPp1Oj4tGYdm8IuDv7L3d96d3vuO2U5PqNcncfaByj2Q753fxv60XqHE9FAcoi7qToiqE3J9cet/8AgN7OQ8gtvy+lUO/rIMhV5OYmka7BhRUUJkUqk5Pz1jwmN/Bm1oZqm3V2W02LMunUpLxO9mfzkDbyjkG2EvyuCf9dAzKQFqPXn5XkoxxCRyZPZGKwhxAY0xs6+zHMsfhPtHGUVrguavHEN+KTqP+3aDf2Xv1KEmVMnednUXbKnye46+xvTAvh+2rlzDhHCvd2poZ3svKBD+rVbUdPQPI6AURFdz/zdd0CHuwastfe2bdV+Pdw+7jttJ69vF53ZF5kyi3HMBus5L96QyO2ZxFUX1yWMb8R/HOX9puR7kGf7rzFwAGE53H/AeAuG4DOfWxDyje9L86/c7eq0exVPDGugIWVslf/praLXlF/G/lOp49x06Ptiau6GXjkVXrah2v4K9gk/ELTUuKqkZSknOMnWuXE99zMGlXPxxUE7q3qnNUylOdTZO9AzSQul93eN8uTCaz82BSbafLbdMBsFoOEt+hO+WWAz6T1qtyN6punX43dq3pNPo5zDGxPnfyGBPb0vmWl3BYKzg272/1/h0L83J4598PQP4RnhjlbE5dPL4/Ny3Mr/OQTvcqVc+2BmJNBnq2NfhdrfLXuB53aDWG0lwZvSBEiNSUv56evdxn8nqg12UYjKT17ONsQ2hgDqtL/gI48laV/iUFBMjllrwixv1zNoX5ebx/o/Oswi/Hd+eGRYV1HtLpXqXq2dZInMlAz7ZGv6tVtd2FOPbKs2T8QhOToirEikvLKS0pZt2sx4lrnUJ55l7y1n0A+DZ2Bjr6oTnTDjsVmXsBjb0ohyNvTcBgjqt1G8HdDH7V+Mm8/cSdqIKjjBicTEpiR6D+BxdvX7+aNbvz+OzXyuGilhIHHfK+rvZ6f3cirpw/k677lvDXxXv4z/W95fBkERXqepZfS81hlfnLecByXfIXOIupW/85h/1HsykrLuDGwQkNPrj4i3Xb+WVXIct/dQ4WdTggq8TBoPzt1V5f212IT81aTl5uHkq1alAcov6kUQ1W+QAAIABJREFUqAqRsnIrzyz+nl0FMUx4YR4duvau9fkNPfohWEajEWtRDhkLHkNrXJOLqeGsrMAMMQkcmnmr65xBu9dIBgOFq2fRZthdxCSnVOv1qom7GXzpK4/RpvQQJQbFexvy+GD3AQxesdX14OL+Qy5geNfcasNFd3Y+v86/366fvmXt7zm0N5cwdNpukpKT6xWDEM1RXQuicOUwgzKwZ9Z95Foyg8phhpgEjs59CEdJHlrjk7/yPn0FY1wCBqORUybOAiD/t++wfP0ZHXsPoHpXl3N1KevIPooKrMTHGHh7QyHLf1cYDJUrW3U9uPji0/tzcVpJtcGipPWv1++4dNVGDNrGkGmHaJccX+84RP0FVVQppa4HngT6A6drrTeEIqjmxOFwsGTtdpasO8RJ1z3IBQEOBG4M/j5Zuiele3PPYwEoKrMR36F7tedA9U+h+dkWNj4z0nMYqrUoh043/ovM/z1Ht9une5bPta0Cc0wsR2ZP9BRwtQ3Ug8pG8VeGd2bMmxt55Yp4/vO9jatO7cihHtdx0ai7fcYa1EVDh4t6u+nRV0J6PqCMZog8ksMiQ005zFqUU+PqUOuUVM+WYV1zmDt/ASjtQCsDnW78F0ajkcz/PU/bKx7yyV/gHKPgLuAASvOySG2dSNbBjaQm+9657e6BeuwcE0+vsWEyGrjx1LYk9hrCQ6Mv8RlpUBcNGSxalSWviHaJJmaO7B6S8wFlLEPdBLtS9QtwLfBaCGKJaDWd61dRUUFpWTm3TXqGoQ/8xWdFJdRqW5Kv7ZR4f68JNDOqtk+h7iGfRV++gqO0wGfmjHe3gbuAC3QbtrtRvJOpiBsHGHj8qxKu7h9Phb2C39Y4e5jqO9agIcNF/cUVqiZ1Gc0QkaImh4Vzuy7QlmJN7z9lwiiKPnuRPX5eE0htOSwpKdnTd2Utyqkxf0FlAQeVZ//FxpirXXPux99xbucyTuygOKe7ke1ZGuxWz1mA9R1pUN/BojUJdYO6jGWom6CKKq31diBkTdiRzPtcv7LCXHasfJ/YjsdTvuVzThhyUaO/f0OSXm2vmTJhFHlZu9g/fazP4+47ZwIVXVXvFPKeOWPHdypybdyrVI9dl0RZ9j5GnWhm8a9WSq12ftyfy9C+MZ4ZVU051iDU5wNWnQZf3+vIKlfjiKYcFq7tOgh9/gJncdXQHOZ97cnj0omJjfXJX+DMYUmpxwWM071K9fRZdoxKMaKfifG7y8j/rYhLBrT1zKhqypEGoT4fMNixDNG0yiU9VfXgsNvZteYDSssr6HTZPRhj4in8+ctwh9UggRJWoLtxvFU91sKtLp+A3atB8bZCjGYHpcDtp8Tw9k9lXNArhlmrDqJMbzLhgi6NNtagpoKltjELDXnvYFe9ZJVLCF+hymF5WRkU/e/5ao8r7ahTMei+U69bK0VRheakTkaG9jax/oidF1dlYTKt4q8XpjTaSIOaCpZAYxbqK9hVr2ha5QpYVCmlvgQ61fCjyVrrug3scF7nTuBOgNf+NpI7h59d5yDDrazcSkFBIVuWzyH1zBG0a1/zHn591PUOm1CrbQsA8PzMe3K60Wisdiu0+1q5lkxUXDKdx77geVwB9oJjtd7W7ObufZr1tQW7zUqiGZJiFEaD4sNdYDA4aGO2cfXxzjk2jTHWoKaCJRQ9WW7BrnoFu8oV7UKRw7zz1wNPPMfAi0aEMMLmqbnnMPd17FrT+abKKeoKMMXEVlsB82f1pp1s/q2YN360k2DWJMU4i6u8MjAYINls54rjnMdmNcZIg5oKllD0ZLkFu+oVbcNHAxZVWuthoXgjrfXrwOsAfDetpqPEI9LnG3bx5srd2GOT6Xr1gyG7brhuOQ60BeA9ddh9krt7unLGwT3kWjJ9prRrhx1HSR62vGOea2mHjZjYWPKzLTV+WvRewXL3PrnP3ps30jk1PbOgjPTp20kwGRg32ESCvQCHvX3Ixxr4K1hC0ZPlFuyqV6h7u6JNKHKYd/76fFuG3p9dfWU22jS3HOYeIOrOYbmWTOchzZpq+ctuct7R7J2/SvOyWP1pIu1bxfkcUfPh1Ames/cW3ZBMapKJo/lWzp2xH7SDWwabMdrLsNkdIR9p4K9gCUVPlluwq17RNnxUtv/82HskmycXbSSp33lcNPF+Vt12dbhDalLGuAQOvjkBrTX2kjwyDEYcDjvm1p1IuPRBOnXrzeF9u4hN7c6+aWN97sAptxxw/lw76tTDUZiXw/QHb2BEX+0pPOJthYzop1iyTbN0u43ZWwqwm3YRl5gE1D5J/Z1/P4BCMWbyywFXdJqiYAlm1SvUvV1CRAuHwciBWc7ix16SB4AxoQ3GVu0955T6y19pPfuQYTD65K9jX79H/3MuZcfbf6/2Xq8uWe0zD0rZSrmmr5El2xws3W5jzpYiDGYrrRKdU9trm6Q+7p+zUShmPz4u4IpOUxQswax6hbq3qzkIdqTCNcA0oD3wsVJqs9b60pBEFiZHLfn833vfY085nlNu+ScJyc7/SMK11N3YvD+9AZ7l8pRL78VutxOb2p0jsydy0oSZbJ1+N11umepzLlgorF+xkDZlh5m/Po6PdlhxOBwU5WZh0A4GdjTyweg2bDpsZcI3bRj73JJai4n1KxZSvPcnWscZAhZI3k3ylsP7GTm4IzcvCX3BEsyqV6h7u4SvlpjD/Im2HNbukntI6ORsND8yeyIAXca9FPL8Bb7zoNokxpGZU4DSDgZ1NPLh6FZsPGzlkW/jWTr1wVqLibkff8fve/bTJk4FLJC8CxarzcE5ncv4q5+jbIIRzKpXqHu7moNg7/5bBiwLUSxhZbPZ+deCb9lZGMdZtz1LbHyCz8+b84Tg2rgLJ3OS8ziYqlt+jc2z/XZzP88sqHWfLCD/m7e4IM3Kg2c5B9bVdsyM97W2rVxCSrzmsXNNPObnrD837yb5cmspcbZCruqjIqpgCWVvl6iuJeWwQKIth5VkVB3G0DiqzoO68IzBbFr3PeemOXjoLOfKVG3HzHhfZ+lXP5ISr5l8rplnAhRI3gVLRnYBrU02zu+iI6pgCWVvV3MR9dt/NpudZd/9xuIfDnDitfdxYc8Twh1S86Ptnk+C4LwVuTy1Q42HpFZV0/bbrp++5cD+YjYfsPPS96We5yqDkc5F/ouJ9SsW0jWmgAt6mTm5szFgEbbrp2/ZmFHCrNUW2sUrckoPktgmlVYRVLCEsrdLCFGDIPIXVN+Cm7tyI8dyylh3wMHU78s8zzMaDZxc7L+YmPvxd7Q3l3FuTzN/6GwKWIS5C5Z3txwjK7eAdvEGckprPsomXELZ29VcRHVRtXHnYV746Ge6nnMdQyc+3KjDO4MVqsF9VbcA3M2axjjnypw7uViLcgB8flaV0Wik3HIAZTCSFFf5r1JS6nFMmj4/4C3N/vqFGjK53L1K1aqihDGDk2gTrxjey8qEWlarxj/3Livnz+SEo8uYcG6q6xibaxqtoJJZUyJahXLwaF1zWHlBFrGt2gPOHtGqY1+CzV9Qc8/Q8t0Ovnr1qXrPcVr61Y8Yy8sZOziR1vGKK3pZeaSW1Sp3wfLCe5/D4Y08dF7rBh1lU58Yo2XWVDCisqiy5BXxtznfYkg7mTPHP0dsXHzgF4VZqAb3eScw90iElPSHfJ5jNBop+exFAJ/3NMYlsG/6LWiHjQxD5fERSjtqTI6BejhC2S/kvUqVmugsjgNtGTZ1E7jMmhLRKpSDR915xrtQ885h7vEJG58Z6XPu6LZZDweVv0rzsti+cyXtW8V5HgtVz5D3KlVl/qrblmFTNYJH06ypYERVUeVwOPjXwh/4NcfEmWP/5WlCb0nq+olwyoRRHNz3O1pD5kdTPY8bYhJIvfRuz2s2Tbke7XWGoHbYMSW2JSaxFQPvqHxdTckx0CfQUPYLNWTLsCmbwGXWlBCB1Td/mZPa+eQw7/xlUIaQ5q+ajqkJVc/Q6k07WX+gYVuGTdEIHm2zpoIRFUWV1Wbnv8s38s2eIgZeeRtDjxsY7pAajfsTYdUp57mW35k8Lp3CnCyS27Un15JJ++ueRBlNaO0cG6ZQHFv4GBkLHqNbz8olcO9PmO7bkL17EBoqlP1CDblWUzaBN3R0g2wZimjivaLlncPc+Ss/2+I8p08ZaH/dk5jbpWHLP4bW2id/lad28BxV01j5C0LXM9TQ6zRVI3gwoxuibduwxRdVX23czRtf7eK4y//MsPTB4Q6nydjLSugy7iXP9+7ZKxufGUnvO6axdfrdKIMJc0o3AM/p7DHJ7UiKM7XYO4W8NVUTeDDbjLJlKKKVdw5z56/D+3aRvdx5eoMymFCmGMwp3arlL/chyPU5bqs5aopG8GC3GKNt2zByO7ODtPdINre98gVLLV258IGX6dk/egoqgIqiXEozD3i+7DYbh/ftQjvqdkdLpCnMy+GNybdTlJ8b7lDqrbZtxtq4i7Gp16Tx25plzfJ3F6Ihts16mIrCnGr5y26zhTu0BrPkFTHi7/8lO7843KHUS21bjIF4bxsu/3p9s/vdG6LFrVRl5xfz9zlrsaeewImjn4j4LZO69hDUe3Cf1phTu1V+6/okp2k2JwT58Ldi0xy2xxq6zSjH04hI11j5y15WgjGprSeHufNXU82eagz+VmwifXssmC3GaDuiBlpQUeVwOJiy6Hu2ZinOGvtvEprJlOC63hXTGNtxymDAajkIOM+7cphMWItySEo9zu9rHDYbJRl7KC/IYuMzIz2Pmwwq5PG51dbk/dWCmVTsXstX82cy/K5HGy2GYDRkm1GOpxHNgeSvuvHX6G3JK2LovVMxVxTw6vur+L/bIm+7sqFbjNF4RA20gKLKZrOzZO0Olm08xICr72JY7wHhDims3J8ItbajbRWex6umDGNcAjnL/+P53lqUQ9vUDp4G9arXc3PPhEno0CPg3TNVNXRVyd+KTWFeDju+XMArw2K4/8v5DB11d4spOOR4GhGN3PnGWpSDikv25LBIyF8Alvxi7p+6oN6rSv5WbF5dspqCvFxmXBHHo59/zz0jLmwxBUc0HlEDzbyo2vDbIV78aCtp593I0AfOQanG/bTRHLgTyr1XnkbWvEeqP8FuY8+s+4gHqGHgnb/ruU0el07CpQ9it9s9Z2yBM1lNmTCq1k+kDWm6rm3F5qsFM0nvbeOCXnGk9y6L6NWq+pLjaUQ0cuePyePSKSWmWg6ryM/CYFAkxXVqcP7qfcc0Mg7uqZa/Jo9LDziM9L0V39e76drfik36uSfz7iffcm1/E2d3MzK0mzViV6saIhqPqIFmWlRZ8oqY/N4P2DoM5Ox7XsAcExvukCJOm/adalyW/+nZUUFf233WljdzUrsaeyvcGjqnyd+Kzdr332LHlwt45joz8WbFHaeY+dOSlrNaJcfTiGjnvZLk9tOzo2idkhr0tavmMHNSO3rfMa3WFauy0lI+WbeR1+o5q8nfis3EF+ejbGXcdnIcCWbFuMEmxixvOatV0XhEDTSzoio7v5gpS9aTYW/FKSMfI7lNSrhDilj+GkO1wxayycb10dCma38rNiUVS7mmt42+Kc5p+H1TTKT3Lm1Rq1VCRKtIy18Ae37bxojj6t90XdOKjc3uIDMnk1GDjJyQasRogF5tVItbrYpGzaKocjgczPhwA98drOC0kY/Qt13wn1QiRb3v6qsjf0vY4ZjbEqjpurZeK38rNk+MOJWPd9r45kCR57H8Mk3J7+9LUSVEE4mG/AXOHHZkz2+Mvrgd4Nt0rbWu9e69mlZsXnjvc6bNX8FHv9n4ep9zzIBdQ2EFJB3dJEVVMxbxRdWqzXt5/Ysd9L70Vi5KPzXc4YRccxuymZSUzMEFj2FOaufzuPMw04oaXxOo6bouvVZVCy+tNSV2AyU+Y08U2tA8R0YI0Rw1x/y1Z9Z9noZ1N3+HxrutX7GQfimQmuQ8M7DqrKZAfVZVxyas3rSTMruBChwUe43eMhoNdO/YrsZriOYhYouqPUeyeXrJRhL6nc95943HZDIHfpFodDUdXePmbwm+tqbrIZePrFOvVdXC66mlm0LzCwkhooZ3I3xNOcyfXT99y8EMG2f+NwuD1w1RKYe2Yy0rCngmXtUZVdHabxQNIq6oKiwu46+z11Laqhcnj36yRTQdtzT1XfKvrel65fyZAXut5DBiIUQoNSSH1XSg8gvvfQ6HN9baZyWHEUeXiCmqtNa8sGw9Pxwo48ybHye5jSyBNoZQ9ECEasm/rgMuZbK4EAJC18MVihxW1+GW0ThVPJoFVVQppZ4HrsLZTPM7cKvWOq8+17DbHSz8ehsfbc7g+IvHcMnVLa9vKhTqehxEY72+MdRlwKVMFheNKRQ5TNRNMDkoEvNXXYZbRutU8WgW7ErVF8AkrbVNKfUsMAmoYeJkzTbtPMh//reVtHNHctH95wcZSstW1+MgGuv1jaEuAy5lsrhoZEHlMFF3weSgSMxfdRluGa1TxaNZUEWV1vpzr29/AK6ry+uycgt5cuF6ylP7c+59L0sTepSqy4DLSJ0s3hwOchaBNTSHCVGXZvNInSoe6Yc4N2eh7Km6DVjo74dKqTuBOwH+ePY5XPvQc7RJ7RjCtxctUaROFm/IkTsi4vnNYd7564EnnmPgRSOaMi7RTEXqXX5V70YUoWMI9ASl1JdKqV9q+Bru9ZzJgA14z991tNava61P01qfdtu/50hBJZotd5/X1GvS+G3NMoryc8MdkqhFKHKYd/664voxTRW6ECHnfTfi8q/Xk51fHPhFos4CrlRprYfV9nOl1C1AOjBUay2TF0WLJ3cjNi+Sw4SoJHcjNq5g7/67DGdT5/la65LQhCRqEuytxIFe7+/umsKcLJLbta/xdc1tmnIoyN2ILYvksKYTTA6T/BUacjdi4wu2p2o6EAt8oZxTZn/QWt8VdFSimmATQKDX+7u7ZuMzIyPurptwkrsRWxzJYU0kmBwm+Ss05G7Exhfs3X/HhyoQIZqDSL0bUTSM5DARTSL1bsSWJGImqgvRHETq3YhCCBFIpN6N2JIEvPtPCCGEEEIEJkWVEEIIIUQIyPafAPzfXWMyqJAcYCqEEI1F8peIFFJUCSA0p7YLIUQ4SP4SkUK2/4QQQgghQkCKKhG1CvNyeGPy7XLMjBCiWbLkFTHi7/+Vo2YiiBRVImp5H4oshBDNjffByCIySFElopIciiyEaM7kYOTIJEWVaNH8bfH5HoqMrFYJISJObdt7vgcjG2S1KkJIUSVatJq2+NyrVKNOqTwUWVarhBCRxt/2nnuVauwpzkOQx56SKKtVEUKKKtFiVd3iyziwhzcm3843y2b7PRRZCCEiQdXtvZ0HMj2rVrUdjCzCS+ZUiYhWmJfDguf/yqi//Yek1m3r9VrfLb5iPpr5D0xZ29lyaA+/KrsciiyEaFSWvCLGP/Mur08aQ0rrxHq91nd7r4xHpi8mP/MIc5Z/KwcjRzApqkRE896+q0/B416lemKkc4vvhsFJvDNjHa/edDyPflXOmGeW1rtIE0KI+vDevqtPseNepVp0g3Py+6jBCbw643feHd2FR79az+LnJ9a7SBNNQ7b/RMQK5g499yqVe4sv3lbIqEEm1u8tkK0+IUSjC+buvKrbe8pWyk2DTHy3t1S2+SKcFFUiYgVzh96un75l4dYyzp1xiLOnHeDcGQdZvtPOd78XSGO6EKLRBXN33upNO5n3czmnzcjklGnHGDLjGMt32lm9u1ia0iOcbP+JiFR1+27UKa25aeEyTr/ixjpt241/7l3Pn1fOn8kJR5cx4dxUz2PuIk16qIQQoVZ1+27sKYncsGg9t6SfXadtuw+nTvD8+YX3PofDG3novNaex9xFmvRPRZ6giiql1D+B4YADyATGaa2PhCIwEd2qbt9536FX30Jo10/f8lNmmTSmi2okh4nGUNvdefUthKQpvXkJdqXqea31/wEope4HHgfuCjoqEfVCWQh5r1oJUYXkMBFyoSyEvFetROQLqqjSWhd4fZsI6ODCEZFsyoRRFBUVVns8KSmZSdPnh/S9pBASTUFyWPRoyvwlhVD0CrqnSin1NDAWyAcuDDoiEbGKigrpfce0ao/vmXVfGKIRIjQkh0UHyV+iKQS8+08p9aVS6pcavoYDaK0na627Ae8BfstzpdSdSqkNSqkNaz4M7acCIYTwJxQ5zDt/fbL4naYMXwjRjARcqdJaD6vjteYBHwNP+LnO68DrAG+s2SNL7EKIJhGKHOadvz7flqH3Z5eELkAhRIsR1JwqpVQfr2+vBnYEF44QQjQdyWFCiFAKtqfqGaVUX5y3I+9H7poRQjQvksOEECET7N1/I0IViIh8SUnJNTZ1JiUlhyEaIYInOSx6SP4STUEmqos6C/Vtx0II0VQkf4mmIGf/CSGEEEKEgBRVQgghhBAhIEWVEEIIIUQISFElhBBCCBECUlQJIYQQQoSAFFVCCCGEECEgRZUQQgghRAhIUSWEEEIIEQJSVAkhhBBChIAUVUIIIYQQISBFlRBCCCFECEhRJYQQQggRAlJUCSGEEEKEgBRVQgghhBAhIEWVEEIIIUQISFElhBBCCBECUlQJIYQQQoRASIoqpdRflFJaKZUaiusJIURTkhwmhAiFoIsqpVQ34GLgQPDhCCFE05IcJoQIlVCsVL0I/A3QIbiWEEI0NclhQoiQCKqoUkpdDRzWWm+pw3PvVEptUEptWPPh/GDeVgghQqKuOcw7f32y+J0mik4I0dyYAj1BKfUl0KmGH00GHgUuqcsbaa1fB14HWLsrSz4RChFFOrWOC9t7hyKHeeevX48U6Ozi8pDGKCKfbcgQDJ06gTng/zZFS2Oo+9+50rph9Y1S6kTgK6DE9VBX4AhwutY6o0EXDQGl1J2uBBhWkRIHRE4skRIHRE4skRIHRFYsTSESc1gk/R1ESiwSR3WREkukxAGRE0uDi6pqF1JqH3Ca1toSkgs2PI4NWuvTwhlDJMUBkRNLpMQBkRNLpMQBkRVLOERCDoukv4NIiUXiqC5SYomUOCByYpE5VUIIIYQQIRCyzWGtdc9QXUsIIZqa5DAhRLBa4kpV2PdUXSIlDoicWCIlDoicWCIlDoisWKJVJP0dREosEkd1kRJLpMQBERJLyHqqhBBCCCGiWUtcqRJCCCGEaHItrqhSSv1TKbVVKbVZKfW5UqpLGGN5Xim1wxXPMqVUmzDFcb1SaptSyqGUCsvdEUqpy5RSvymldiul/h6OGFxxvKWUylRK/RKuGFxxdFNKrVJKbXf93TwQpjjilFLrlFJbXHH8IxxxiEqRksMiJX+5YglrDpP8VS2OiMhfrlgiKoe1uO0/pVQrrXWB68/3AwO01neFKZZLgJVaa5tS6lkArfUjYYijP+AAXgP+orXe0MTvbwR24jxf7RCwHhiltf61KeNwxXIeUATM1VoPaur394qjM9BZa71JKZUMbAT+1NT/TJRSCkjUWhcppczAN8ADWusfmjIOUSlSclik5C9XLGHLYZK/aowjIvKXK5aIymEtbqXKnYxcEgnjeV5a68+11jbXtz/gHC4Yjji2a61/C8d7u5wO7NZa79FaVwALgOHhCERrvQbICcd7V4njqNZ6k+vPhcB2IC0McWitdZHrW7Prq2V90mpmIiWHRUr+csUSzhwm+at6HBGRv1zvH1E5rMUVVQBKqaeVUgeB0cDj4Y7H5TZgRbiDCJM04KDX94cI03+AkUgp1RP4A/BjmN7fqJTaDGQCX2itwxKHqBSBOUzyVyXJX17Cnb9cMURMDmuWRZVS6kul1C81fA0H0FpP1lp3A94DJoQzFtdzJgM2VzxhiyOMVA2PyWoIoJRKAt4HJlZZoWgyWmu71vpknCsRpyulwratEC0iJYdFSv6qayxhIvnLj0jIXxBZOaxZngyptR5Wx6fOAz4GnghXLEqpW4B0YKhuxAa2evwzCYdDQDev791nrEU11/7/+8B7Wuul4Y5Ha52nlFoNXAaEtRG2pYuUHBYp+asusYSR5K8aRFr+gsjIYc1ypao2Sqk+Xt9eDewIYyyXAY8AV2utSwI9vwVbD/RRSvVSSsUANwIfhjmmsHI1V74JbNdavxDGONq77+pSSsUDwwjjfzMicnKY5C8PyV9VREr+csUSUTmsJd799z7QF+edIvuBu7TWh8MUy24gFsh2PfRDmO7iuQaYBrQH8oDNWutLmziGK4CXACPwltb66aZ8f6845gMXAKnAMeAJrfWbYYjjHGAt8DPOf1cBHtVaf9LEcZwEzMH592IAFmmtn2rKGISvSMlhkZK/XLGENYdJ/qoWR0TkL1csEZXDWlxRJYQQQggRDi1u+08IIYQQIhykqBJCCCGECAEpqoQQQgghQkCKKiGEEEKIEJCiSgghhBAiBKSoEkIIIYQIASmqhBBCCCFCQIoq4ZdS6lGl1KxwxyGEEEI0B1JUtWBKqX1KqWNKqUSvx+5wnY0UkNb631rrOxohrtVKqTKlVJFSKl8ptUYpdWKo30cIIQJRSp2jlPrOlYtylFLfKqXOVUoVK6WSa3j+T0qpCUqpnkoprZTaVOXnqUqpCqXUvib7JUTEkKKq5TMBD4Q7iBpM0FonASnAauCd8IYjhIg2SqlWwHKcR+C0A9KAfwD5OA9SHlHl+YOAAcB8r4cTXY+73QTsbcSwRQSToqrlex74i/vAyaqUUi8rpQ4qpQqUUhuVUud6/exJpdS7rj9/qpSaUOW1W5RS17r+3E8p9YXrk95vSqkb6hKc1toGLMCZqNzXPV0p9b1SKk8pdVQpNd11kClKqRlKqalV4vhIKTXR9ecuSqn3lVJZSqm9Sqn7q1x3g+t3PaaUCutBoEKIsDsBQGs9X2tt11qXaq0/11pvxXme3Ngqzx8LfKy1zvZ67B3glirPmduYQYvIJUVVy7cB50rQX/z8fD1wMs5PafOAxUqpuBqeNw8Y5f5GKTUA6AF87Npe/MKyTil9AAAgAElEQVT1nA6u572qlBoYKDhXsTQa+MHrYTvwIM5DQ88EhgL3uH42BxillDK4Xp/q+vl812MfAVtwfuIcCkxUSrkPXn0ZeFlr3Qo4DlgUKD4hRIu2E7ArpeYopS5XSrX1+tk7wLlKqe4ArvxyE9ULpneBG5VSRqVUfyAZ+LEJYhcRSIqq6PA4cJ9Sqn3VH2it39VaZ2utbVrrqThPpe9bwzWWAScrpXq4vh8NLNValwPpwD6t9duu62wC3geuqyWmV5RSeUARMAHnkrs7po1a6x9c19oHvAac7/rZOpxL80NdT78RWK21PgYMAdprrZ/SWldorfcAb7ieA2AFjldKpWqti7TW3oWcECLKaK0LgHMAjTNXZCmlPlRKddRaHwS+Bm52PX0oEAd8XOUyh4DfgGE4V6xklSqKSVEVBbTWv+DsG/h71Z8ppR5WSm13NWnmAa1xrhBVvUYhzmTiLlBuBN5z/bkHcIZruy7PdZ3RQKdawrpfa90GZ5JKB5YopU5yxXSCUmq5UipDKVUA/LtKTHOoTHQ3U9mP1QPoUiWOR4GOrp/fjnO5f4dSar1SKr2W+IQQUUBrvV1rPU5r3RUYBHQBXnL92HsLcAwwT2ttreEyc4FxOFfp323ciEUkk6IqejwB/BnnthgArv6pR4AbgLauIicfUH6uMR/n1tuZQDywyvX4QeBrrXUbr68krfXdgYLSWju01muB3cAlrodnAjuAPq6tukerxPQuMFwpNRjoD3zgFcfeKnEka62vcL3XLq31KJxblM/iLOQSEUIIQGu9A5iNs7gCWAqkKaUuBK7F/yrU+8CVwB6t9f7GjlNELimqooTWejewELjf6+FkwAZkASal1ONAq1ou8wnO1aCngIVaa4fr8eXACUqpMUops+triKu/ICBXkTYA2OYVVwFQpJTqB/gUZ1rrQzh7wd4B3tdal7p+tA4oUEo9opSKd/U4DFJKDXG9z81KqfauuPNcr7HXJUYhRMvjusHmYaVUV9f33XCuNv0AoLUuBpYAbwP7tdYbarqO63kXASEfQSOaFymqostTgPfKzGfACpzNmvuBMpyrPTVy9U8txdk7MM/r8UKcq0w3AkeADJwrQbG1xDLdNaeqCGdx9JjWeoXrZ3/B2RBaiLPPYWENr58DnIjXKAattR24Cmfj/V7AAszCuaUJcBmwzfWeLwM3aq3LaolRCNGyFQJnAD8qpYpxFlO/AA97PWcOzg+TtfZKaa03aK1/b6xARfOgtNbhjkGIelNKnYdzG7Cn14qZEEIIETayUiWaHaWUGedA01lSUAkhhIgUUlSJZsXVp5UHdKbyDh0hhBAi7GT7TwghhBAiBGSlSgghhBAiBKSoEkIIIYQIAVNY3nXjbNlzFCKatO8H3f/ob6hs8/L7Sk3egXBHIVoQu93B3M828vX2Y9CmG5fcMhGlmv4/l90/fcu2774g2VHAZYPTuOyPA0iMr20yTpQwxsDJN9XpLyQ8RVV5YVjeVggRJtbSwM9pLqylksNESFRYbcz+Yitf7cihx3k3cPZdZwJQXB6emcSd+p9Bp/5n4HA4+H7rej548wsoyWVAp3iu/WNv+vboGPgiLZHBXOenhqeoEkIIIaJUSVkFs7/YyqqdeRw/7GaGXXJquEPyYTAY6HvyGfQ9+QwALEcPMnXd51g/+IJz+qZy5ZDj6JRS2+Eb0UuKKiGEEKIJaK35+MedzP7uCAMuu4VLLj8x3CHVSWrnbqQOvx2AvTu28MhHnxOTf4Drzj6eS049LixblZFKiiohhBCikWXmFvL3OWtJGDCMS+57sNkWIj36DaZHv8Forfnwq6W8M20NgzqaGXvRQLq0bx34Ai1cxBRVDhTFxnbYTXFAJP7LpjHayki052BA+uyFEJUiP3+B5LDw+fKnvbyxai9n3/4M8YnJ4Q4nJJRSnDxsBAwbQca+nTz66cfEFqznnssGMvi4zuEOL2wipqgqNrbDnNSGJGUnEgt4raFcx1FcBMn27HCHI4SIIJGev0ByWLgs+PpXVue056J7n8NgNIY7nEbRqecJdOp5AhXlZbz00ds4Pl7JsP6p3HjhQMymlvk7+xN0UaWUigPWALGu6y3RWj9R3+vYTXERnZCUgljslJniIDw3ZgghGkEoclik5y+QHNbUrDY7k+Z8Q3nnP3DqVaPCHU6TiImN46zr7kZrzfat33PzK+9zUd/W3H7pyZiipLgKxUpVOXCR1rrIddDtN0qpFVrrH+p3GRXRCQlwxRfhQQoh6isEOSzy8xdIDmsq5RVW7v3vKo7700OkdukR7nCanFKKXoPPoudJZ7J/2zpunvE+f+hsZtT5/enesW24w2tUQU9U105Frm/Nrq9mu2H/6dqN9L3ibo6/9E6eeWNJuMMRQjSylpTDJH+FX1m5lbtmrqLPtX+NyoLKm1KKnoPOYOi9z+E4824mLT/MA6+t5PfDlnCH1mhCckyNUsqolNoMZAJfaK1/DMV1m5rdbufef73Gitee4NePZjD/kzX8ulsmJwvR0rWEHCb5KzK8sOxHul58B+06dQ13KBEltXNXLrjlEXrd8DhPrczj8blrKCopD3dYIReSokprbddanwx0BU5XSg2q+hyl1J1KqQ1KqQ2v/+/bULxtyK37eRfHd+9M726diIkxc+Pl5/K/lc0utwoh6ilQDvPJX4s+DU+QAUj+Cr/XVmwhO+18up/QPOZPhUNicmvOGfUA5nPGc+vrP/L0gm/JLSgJd1ghE9K7/7TWeUqp1cBlwC9VfvY68DoA300Lamn99JsnY8mvfuxFaut41r37dIOve/hYNt06pXq+79oplR+3/tbg6wkhmhd/Ocwnf+34WJOzp8HvIfmrZdqw8wjfZxg5e8xl4Q6lWejUvTedJjxL5uH93DV3Fn1bW7kv/WTat23eIydCcfdfe8DqSkbxwDDg2aAjq4Ulv5SB41+s9vi21x4M6rpaV6/1lDR1CtGiNXUOk/zV8hSVlPPM/37h4geq/72K2nVI68Gwu/5JdsZhJi5+mzM72rjzij8QY46YiU/1Eortv87AKqXUVmA9zn6E5SG4bpPr2imVgxmVDXSHMix06dAujBEJIZpAi8hhkr/C5//eWctpNz2CwRCSjpqolNIpjQtue4zDPa5k9MureO3jTZSWV4Q7rHoLuhTUWm8F/hCCWMJuyKA+7Np/hL2HMkjrkMKCFWuZ99xfwh2WEKIRtZQcJvkrPNb+vJ/iDn8gpWOXcIfSIvQeNITeg4Zw8Lct3PLKG9w59ASG/qF3sznWp3murzUSk8nI9MnjufTPT2J3OLjtmmEM7NM93GEJIURAkr/C4/Uvd3DOPbLtF2rd+g4mrc8rLF25lIWvfMpjI8+gR6fIX3mVoqqKK84/jSvOPy3cYQghRL1J/mpar634ibQzr2mxx8+Em8Fg4A/DrqP4jIuZtORVBrfawX3DTyMhLibcofnVLIuq1NbxNTZ1praOD0M0QghRd5K/Wo7Vv+Vx0YQLwx1Gi5eY3JoLb53E4d9/ZeyMWYw7pxtXDDk+InvYmmVRFcxtx0IIEU6Sv1qGFT/upF2/M8MdRlRJO24Ane59jpU/fsGilz/h0RGn0q97+3CH5SPyyjwhhBAiws39Zh+Dh14b7jCijtFkov/Zl3Pabf9mylfHeOXDTeEOyYcUVUIIIUQ97D6URWJav2ZzR1pLFJ+YzDljHuFY12GM+s+nETOVXYoqIYQQoh7e/243vc68ItxhCKDnSWfyx9uf5s9vb2L2Zz/hcDjCGo8UVUIIIUQ9bD1cQoe0HuEOQ7gkJLfi0nun8HOrc5n4+kqycgvDFosUVV5um/wyHc4Zw6CrJ4Q7lIhiyStixN//S3Z+cbhDEUL4IfmrZqHOXzsPWYjr0jck1xKh1W/I+XRJ/wsT5v7Ext8OhiUGKaq8jLtmKJ++/mS4w4g4cz/+jtyMg8xZ/m24QxFC+CH5q2ahzl+rthyg1xmXhORaIvRSOqVxwZ3/ZMZmxazPtjT5+0tR5eW80wbRrnVSuMOIKJa8IpZ/vZ6Z16ay/Ov1slolRISS/FVdY+SvjfsLaN9ZJtVHMpM5hjNHjGebcQAPv/5lk54h2KyLKktuASMmPEV2XkG4Q2mx5n78HenHG+jbIZb04w2yWiVEiEj+anyhzl/lFVaKHDEyQb2ZGHDBn+iY/jfGz1xFcWl5k7xnsxz+6TZ36WfkHt7NnPc/46Hbrw93OC2O+1PeohuSARh7SiI3LFrPLelnk9I6sdbXjX/mXV6fNKbW5zWl0++egaWw+n9UqcmxrJt5bxgiEtFO8lfjaoz8VVxWQUJqWqPGXZMpE0ZRVFS9+TopKZlJ0+c3eTzNSdsOnTlp1P9x18x/8er480hOjGvU92u2K1WW3AKWf7GKmdd2ZPkXq+TTXiNwf8pLTXLW3qlJpjp92ovEHixLYTkD/zy12ldNhZYQjU3yV+NrjPy1eO1vdBl8XqPEW5uiokJ63zGt2ldNhZaornVKe/4w9nHuen0t+UWljfpezbaomrv0M9KPU/TtGEf6cYo5738W7pBanNWbdjLv53JOm5Hp+Zr3czmrN+30+xrpwRIiMMlfja8x8tdBSyFtOzT9SpUIXnKbFIbc8iR3vf5Now4KbZbbf+5PeYtuaA3A2FNbccOiVdwy4lJS2rRq8HVH/eV5Vq/7BUteAV0vvJV/TBjF7SOi9y6PD6fW/9Zs3x6GMuYs/5aHRkfvP0MhqpL81TQaI39lVcTSo3XbUIYpmlBiqzb88danuHvW40y/7UxS24T+xo5muVLl/pTns6wbgk978//zV46umYN16zIOrXo7qhNSQ7g/5Y09xdmHMPaURFmtEqIKyV+RqS75y2SSBvXmLiG5FWfd/k8mvrkWrXXIr98si6rV67Y4l3WnH/V8zfu5nNXrmn4mRVXRPCizoT0MQkQTyV+RKVD+KimrIL/UHs4QRYjEJyb/P3vnHR5Vmf3xz713SnqvQCihCzZce8GGiIBYERRR7Ci6Krvs+tN13WpbUaTYWFGkSBMLRcRKXekdaaGEkDZJJplJMuWW3x+TGWaSSTKTTCCB+TyPPjAz9513AvPl3POe8z1k3/QQf/z4l5CPtWn28Z8gCFnATCADUIEPNU2b1Nx1G+LrD/7Zkss3C+8ix7Pt2OvnLfs5UWRnzs4in8fbFe4P+GfRUl16KbFGdn803u/jYc5uTrWGhfWrddKYfpnMVuKyejW4Rkt16cXExJIz/Wm/j4dpGlm9+7G7pIB5v+xk5HV9Q7ZuKGqqZGC8pmlbBEGIBTYLgrBS07Q9IVi7TeFd5Dh2SeOtu2caTalhqI27S682/gKiYAjbJoRpgLCGEdavUOiXu0uvNv4ComAI2ya0DH2uuoUv31/HdedVkJHc9HpGb5p9/KdpWr6maVtqfm0B9gJNaI/QaIHjzZDi2l/9mwzWaC6UqfaWTNufzUcCYc58QqNhrV+/oGENa4pRZqi0IaxfYU4XVz/4IuNnrA7ZMWBIa6oEQegMXAj8Guy1kmzDrkmtVpg0DeyahCTb/D7flCLtUPo5BbpWUwSmNfpOnQ4uGTuV7FET6/x3ydipp3trYUJEUzWstesXNKxhTW0yCZU2BLNOsBoWqj3mFJSji2zbY4BeHTeSFx8cUue/V8eNPN1bO20YIyLJvOZePv0uNDWNIbNUEAQhBlgEPKtpWh0nO0EQHgMeA/hgwj08NuxKn+ejlVIqrWDTRQBCqLYVQjQk2UK0Uur32YaKHP3VJoQy1R7MWsHWTJztRwLetNTRZJjWQUMa5qNffxvHYzf28Lm29esXNKRhweoXhE4bgl0nGA0LpX79uPM4ve54tEnXthZa6niyrdOz31V8++4iHrhJRRSbl2sKSVAlCIIelxjN1jTtC3+v0TTtQ+BDANZNrnM/J6IRq5RAG22uCLZIO5R+ToGu1RSB8V57UHYVA55+m5WTnztrA6swZyaNaZiPfv22VKM0x+f5s02/IHQaFsw6TQnAQqVfgiC03ng5TLPJ7n8PHyxbwdghv2vWOqHo/hOA/wJ7NU2b2Nz12irBFDk2dSZVc9cKVgRrr31LV/hgbQnTFv7EXx4eEtD+gu3mC3fphTnVhDUs+CLtUGlYsOs0JQBrjn7BSQ0rM5ejW74dQXBlMvx19IW79Nou2edfxuo183lC01wBdBMJRU3VlcD9wPWCIGyr+e+WEKx7xhJKP6dA12pqzZd7baesolNsPHKRgTnfrgu4niHQmXvueqXaj6fEGsmZ9Xy4ey9MSxLWsCAJlYYFs06wGhYK/YKTGpZ2yWC6jHnb79w9d61SbTuFmJhY/vXJknD3XhshpvulrN2d26w1mp2p0jRtDeGkaFCEws8p2LWaUjPhvXZFpQ1kB3ERAkaUgFP9iqKwY/a/6HX7Mxii6r9ba0v1Sms/+isO28mhnE5rBdmjJjbbSyvM6SGsYcETKg0LZp1gNSwU+mUyWzGbCnFUNTy4uC3VKu2ePh7F5jv7zmkt5dVxI8/64K/zBdewZvUUrurbsclrtMnZf22dUPihBLtWU0TQvbbJbGX4hEnMHx5LSowOk1UOONXvqLLSXqqgcPN3ZF19Z0B7ba24jyYrTRVkjjhp4KiTBHp3TGuVAWCYMC1BqDQsmHWC1bBQ6NfMpevoFOOkcPN3tNEBJD7ExMRSduQQGSN8DWglScK64u3TtKvWQ2JaJpvMzWvhDQdVbRCT2crjr83iwxfuD7h+IRDxqm/dpmS53OsZlUr+NbgzTy9dheOimxrMVrV23Fmo7FET6dMl/TTvJkyYtklT9Asa17CW0K8lv2zkb9dF8Yf1q1A7Xx7wXlsrL0yZy4sPDqF95+51nsvx8/qzEQuRVFRWExcd2aTr237ofRbSUr5R9a3785b9rlllU4s8/83ZaefnLfsbXW9YL4nslAiGdRdq7vbChAlzNtOW9GtIN5EuiRLDugtY8w6EdL9hWiexqe2wOeQmXx/OVLUxWso3qqF1m5Lqd6/3zysisBXnMrSDyoLFCynbvRpJksLdfGHCnIW0Nf2aPzyW4hIHQztU8NGqExya/jSSzgCEO/rC+CccVLUxQulv5cZktnLDU29xUwcbXZPjA0qN+1vDO/Xu3ucVvU4ekz1TUg7tz/e7bthKIUyYM5+W0q+bnnmH4b1AclQwKFsfMv1KidGREpNKb+Dy7BPEXjmUAaP8WyaErRTCQDioalOE0t/Km2kLf6bCXMbt10diMlubtG5tl+Ngi0rbUtdcUwLAYP26woQ502hR/Sor4fqsOBSnzC1ddTy9MvT6daJMIWPber9BVVvqmmtqAPjquJF1LCPc17Wlz9/ShIOqNkRTCy4bwmS2MmvZWu7oraNPqki+pZJuCTFBresv9d5Qyv1sDDDakmVEmDAtQUvp19wV63joQgORgoOOCQaOldsYlB0Zcv3666w1pN/xylkbXLQl24jTSTioakOE0t/Kzcyl63A6bHyzT+PHwwoWB4h6J3HREQGvG7RT+ykMMFoigAsHSGHCBE9L6ZcRB59sU/h4i0as0U6lE9A56VXcMvp1qoOLUAdx4eCoYSoLjxAT2a/J14eDqjZEKP2twHWH9sUPv9IxXmLR8GjiIwU25zn509pIvngrsPlYLZXSDwWXjJ3K9sPFPp5S4PKVMq14/TTtKkyYs5OW0K8lv2xk3qj2jF14gtl3RlJh04hPSuHehVZmvDwm4DVao365g6kyU5GPr5QkSWRkZYeDoBZAkWUi1GqiIgxNXiMcVJ3FzFy6jlS9jas760mJdrlrdE6U6N/OEXDqPFQp/bUf/ZVKk8uZ3JvmZpT0MYlEpnbweby6+Hir9BI5G49Fw4RpKm7tWbrXypDuOjJjJAQUcFYHrEGhPJI0Fxfw4oN1Zwo2N6O0Y8pYjCknHb7tpmNBr3UqOBOORfNy9nJJl/hmrREOqs4Ammqm9/OW/Ww8ZmPDMZW31ttQVA2brBFtlLioMrDUeahS+g5bNZkj/lnHVLOxI7WGApGGyC+11Ang3NedrgAmfKwY5mylKRrm1p48kwURlTfWVtfol5XM5LiANCiUR5KaIAZ9rNZQINIQBbk5lJmK6gRxpzOAOROOFQ+sWsyfH+jbrDXCQdUZQO3OlUCpnY6fOPs7lqz8hSED+ge8TqhT+sHS1EBEVbVTGsCELSPChKmfpmjYmaBfTQ1EFEVBH5NU59qWDGDOdNuIygozaYKZ6MjmaXI4qGrj1Gd6F+ydX6CmfE3NinnjL8BwWivQSW1jpm1TAqTw8V2YMP7xpz2aprWIfrlf2xwNqy+4EIXWWFRQl6YGR23lCK+pbPrqI/5x6wXNXiccVLVx6utcCfbOL9AOmKZmxbzxF2Bkj5pI745pTVqvIQwRkeTOeM7nMae1DFFT6rx277Ei8gKo6woHSGHChA5/2gO0iH65X9s0DdNw2G31Bhf+6qlCgRQRxYlPnvX83mktBSAixbdWNNAjwTM9OGoKJfm5pDrz6ZJ5TrPXCgdVbZj6OleGXH1BUKMgAu2ACVU2a8/hfGwOmcpqB19tPo4oCpSUlbNz6adgiEIQpZNrlVXwyIyt2CtKyYzTex7Xi3DbJZ2w2+04bZXoI3zfJ7+4DABVqPB5XBQFzu+S6rcOS1Y09DGJ9Hn0DZ/HwzVNYcK0DP605865G1A1jekh1i/v1zZFw4Zf0Y2p65Zz4cARofr49WIuLgBg82v3+DwuCiJZnbvWHBv6li+cjiPBMwFFltkw900+f+6GkKwXDqraMPV1rvxpygK/d23NneIeTDarMO8oA8a9zX//8iD7T5Sz5bCJ/AonhugEhOQuRCdlgSDQY8Tj6I1Gkn8difVE3YGlqVnduPT+l+o8bjGXMnv3Riplkb2rlyOKAmpVBfrIKFK6nIOiCQx88b91rtv90Xg2vPeU3yL100247irM2YY/7enf3sHOQoWeackh1S/v1zamYdMW/syv2/czbeFP/OVhV+YnNsoIat0Mt5tQ1hxpgshFf55X5/Gc6U/zwpS5LZYVaw5tteZqw9cz+OPgXhj0oQmHwkFVG8Zf54qqahSbS/joOVcLrvddW31p70A6YAK9G/ztaAEffbWaSBTyzMWMmfwjNz/wDN3uv5JuDXyWYFPSsQlJ9LlyIElzP8F6ZJvn8SpNw7RrLarsYMe3c9BLEu36XExUUrpPNssdwOSXWlBVDQBFVQH4afIEDBGRXPno34LaU3MJHyuGOduorT2qqlFcZqFvussnKFT6BcFl5GctW0tGtMaspWt58q7rAqq9asqxWlPrs9zXlZeYUDWXbqmqggDsmDIWKSKKPo/UbcRpSdriseLun77g6oRiLu9zfsjWDElQJQjCx8AQoEjTtOb1I4YJGH+dKxNnf4eSuxGzuYyEyGTPXdu0hT/x06/bmzzFvaG7wftvuZz5a/ax6Ug5u3LyubhjFNtyKplxayRPf7+H9C7NO6cO1P/E/TpDdBxCtY3iAzvQVJW8Xf8jre8VCE4bDpsdh1P2BDDZoyZ6ugB3Hi70eFrVrsMKc+YS1q/Th78OPvI28/QVsRw8XkznzOSQ6BcEntGatvBnBNnGPwYaeXaFzSdb1VQa0rB/fbKkzmtUTWXHlLEAfgMkt+69+OAQz3Ff3pEDHj8r7xqsMP4pzD2EcHQ9Yx65NqTrhipT9QkwBZgZovXCNJGft+znt8MW3v3FRmRkNXHREQDI2hZGn6dv8nT42neDmqZhrpJRxP+x1ZlF18uH0+eSFLa/dB+J6RoDskX6d5EYkm3nh7nvMeyJ/2vyZwq07dj7dbUFJvPGR1BlB/v2/crDH/5KqsHBM0POQ9O0Ju/LH2EDzzbJJ4T1q1Xg1pn3/1dOdfVJDWuufnmv3VhGftaytQztIXFtFx2Du8t8tmQNT951XbM+VyAa1pB+nSrOBAPPQCg4sp8T373H5Mea9+fqj5AEVZqmrRIEoXMo1goTGPXVF3z8lwcZPmES7w1JZ+ySKha8+SyapjF8wiRG93O9rimjGNx3g8VlFj77cQ/rD1eQffXtdOx7KQajK3D7ce573NBR4bttJcy+3UCkTuDhC/XcvmguN4wcS0x8os+ap/oLLOoMGGMSuObxf2Mxl/D37xdQXFJG8aFdpGT3QScJVBcfB1wdgu76pmBqmsIGnm2PsH6dHvxp2NdvjcNktvpo2PsvPcoT//yoWfrlXrsx3Fmqhy6IwCjBQxfqWbK/mmkLf+KBwVfUeX1rCkIkSfK4rTutpZ6ALZiapjPBwLMx8nP2UvzjR0x5/HokKfQ2GOGaqjZKffUF9bUnN5b2bqxjL99UzjtLtnO0Ooq+g8Yw4NbOCIKvr9SBrWv55YCJu7sJdIoXscsaXRJEhmTbWf3FDAaNed7n9U39Ahfk5qAoSp324fISk+fXjQlMbEIyl9/1BN988w1lVTLHv/yIdn0uJq37BQiCgJgSR84s3/2eTsIZsDBnGoFqmLvxpjn6FSjzv9/EzV0lOiWI2GSNTvEiN3eTmPvdRr9BVSg1rMxUREFuDhlZ2U0KkDKysj2/tqekeY4VWwutIQDduuQT4st/Y9Jj1yGKLeMrdsqCKkEQHgMeA/hgwj08NuzKU/XWZxz1meU9+I9PsJSbWTQiDjh5R2eMjKG4tOG0d30Cd7SglMlLdnBciefi2/9It6SUevd17/+9y6v3X8M3BxRWHatCFEDVoMIBFH5TJ6hqKoqiYEzpWKd92Lv9OFCBEQSBxL79STjnKsr3rSfvq+lkdDsvJPt0E6j/VUOEM2CnFx/9+ts4Hruxx2neUdumtoYNufoCXpj2Ba8+eUedYvK33z7M0YRo5uz0vakIRL+C3VNJuZVlVQqrjlUDGpIAZpuGItW9oWkO/jRsx5SxKIqru7A1BUihGolzOrNgmqbx68Kp3NVNYdBtV7foe52yoErTtA+BDwFYNzm0hSwtSKjugEKJ953ctRm7Vu0AACAASURBVFlVDBj3NrdfewGHco5y97mRde7oaN+7QaHxF6RFReiZtmQrW00S/e58gZ4JSY3ua+PyeWTFKOSWa8RG6BFEUFWwKSppyemNXn86qN19o2kah47uQpKr+W5zDjdddFLcmpotCvtftX189Ou3pRqlOad3Q0HQmjUsOUrCXGbmubc/p7qs0G9W6rn+ydD+ono1LBg39cb21D5Go8Sux1TtJDVKBBHiIqC4WqbMUg20jp+fm2BsDJqaKWrr/lfW8jLWz3iFpwb2ov+5XVr8/cLHf40Qijug5lBbEGu3BQuqE6G6gllL15IVJzJjk4UlhwRUTSO/tJLMpGg6NjIc1DtIG9y1mglTv8SiT6H3rU/Rv2vPRvdoMZfy2b9/D+UneHNoGs9/XcT8J/qQFO0y65yy2sT+zP5N/hnUFo4yUxH6mCSkiCif16lOex2zPACdWP/4m4bE5KtvZ/PZxOW8dv/ltE9NCGeLwrRJWrOGzdxUTpxBZeveHL4Y04G7Z7qyUjO3VXv0SyeJDQ44DsZNvb79ubP8s+/vwD1zTNzZN56/3HiyBnTiqnIW/rAJ2jdNxwLRMIelhBNzXqDAy/wYmq5ftTkb6qVqc3DjD5Rv+orpY68hLjrylLxnqCwV5gLXAimCIBwH/qppWl3nxTZGqO6AmkNtQfRuC84vd7LitwreHmjkoa+qkQQjiVE6br/+YqIjDDXDRa8KKEs1f3gspVUKuwvs/Jx3mHHTphAbQHYKXBmqysNbue3cGNpFClzfWeKGyQeJiXUFfuWlJmzqNH5YsdTnOrdrcGPUFg7vNmJvRL2xXsO8pnDBzffhvO4u/jjrTa7IkAPqFPRn4JlvqiA6pV2T9hCm5TlT9Qtat4Y5FY0vtpfx3uBI7ltUyZzN5TicMgOvOJ/UhJig9QuaVsQ+c+k6T5a/Z1oSAzopzNhUwVf7nJ7X5JdYqJbXoETuZuG8k3oUSg0zxCaTPOR52nfu7vP4qQx6/GW+ykxFdUbitHZURWHNzFe5uoPIY08PPKXvHaruv5GhWKe10dw7oObiTxC924LzSyzc2VMk3gidEwRySuwkRwl8tnQtmUnRfBiAkLo/47Z8Bwt2VPHi0GyyNpnZuHwe148c2+geLeZSdv+4kORIjWFdnCRnduOJm1VWlZdz/2vziIlP5MUHh9DTTxC09fWRrd6BV280ct3DL3Fo50ZMpV9js5QREZtY7+vrm2vY5xQbiYYJnDNVv6B1a9hbP5u4s6eIqqrc0EXH3G0VdIgTmLl0Hd3aJQSlX4G4qde3vy9++JXkSI1buijIisqfBrRjc5mFBW8+63nf7FET6X33nzl6cD/JF93iub4taFgw+Mt8uQLAU2sk2hyO/raDA8s/5C939aNvl1NfdhI+/quHUNwBNRd/guhuC953tJDBT7/OIxcZ0TSNKqdGUgT89Rojv19h4+JUiZ5pSY0K6U+b97FpXzmCzkBcbDS/fOy684opWBtQULVx+Tw6GCq4touezokileWlJCelMrQ7bFj2eYNrxCen1CnAdJ/7N1QU6X03ZS4uQKtxH9bUhg3zmkPXcy/GEJ/Kb6uXkXVOP5I79/b7On91V3mmCswf/bVZDu3hETZhgqU1a5i3fiVGwPC+Bhb/5uTjWyO59fMqLkquCki/AnVTb2h/qXobV3fW0zlRwmS2kpEcF3Bg1lwN89YvVVUo+uoNSiTdaXFE9967N2WmInZPH9/s/bT0GJuKUhMbF7/Pxck2Zj13E3qd1PhFLUA4qKqh9rl/c++AQrGfhgTxz1MXcm9fHZE6gY82O0iJEriyo47eqSKDukrITgcHjxdz7wUJ3LvQv5DmnCghrl02Tz45iYzOrtopi7mUz9/8IyMn/KfRPVrMpexbtZhE2cnnuxzM3uHAVGUhOqEaURQDDsy88T733z19PIqtCoAy0yGPSHm7EDfVUbgpRZuSTk+7gU9QtGUZZXlL6Xbl4Dqv8Vd3JR4rInf2i3WComACorBtQpiG8FeM3po1zFu/zDaVl3+0cd+5erITRW7vpUd2OHHKKldl2vjjTxvqDQT9eU+5fxYl5ZUBDWI2OmXm7JKZtcNBcZWF1MRqRFEIODCrjVvDvPULTmqYt8acSv0KZu/eFOTmkDf7hToBUbDBUEvaJmxfuQA1Zy1v33MJmSnxLfY+gXDWBlX+gijvc//m3gE1d18X9OhQryCOHnwFm3Yf5mikxtf7ZMqqFDJjBe7tq0cDHuqn5+nlDkwVdmIiq/0K6b7cYv765T6uGfs2euPJf9w3Lp+HrnBnnSyTd7DlNvHcuHweQ7vDuKtPjqFxFaXf3mgwVbtN133HpjrslL7qKjbXVAVB0iFFJyDFpnq+7KGoMWhK0WZMTCyHP/49AM5qK8dXLyIpIY70hKh6rwHo3TENtZX5XoVp+3hrmL9i9NOhYbX35E/Dpi38yaNfyw4qVDlUKh0qH90aQbld49F+eh5fYmPfiXLidTL922lBBYL+fhYNBZ3PX9PJc+3EVeUNdhq6qe0z5dYvURCRHTZKX73HR78Aj4adTv1qSqYoIyubqlboewVwbO9mDnw/l9FXZjHo5hvreCeeDs7aoMr7izd68BV1zv0DnSfVUvtadLwInaD6FUSA5/on8/w18Uz8uYTVB8q5vINARqyOI2YFUdC4IF1gxKIqFK2a1MQ4OngJ6fvLt7H+hMi1j/4DSXfyr4A78zT19vY8tWQxl9wywieAqh1sHdi6lq1FNubtOO6zx/oyVG4hAlBkGU0Dq01GiohCF5tCuwff4dj0p2j30BQAVKcDubwAQ0pHTnzc+J+HP8M8c3EBgqirk4oPtMDUm9p3WpWWcjbMeIXJD18e9FphwjQXt1bUNxfvdGiYt67WF9TJ2haPfgHc/nEu5yQ5aR+n54hZQRDgonYS135YRPt4HaXVKn3L9wZcI+WvMD8UQaemqpSVmrAdOYAiywiizqNhqqbR+emZ2E3HKPzyddo9NMVHv4BGNaw+w09LaXEd/WrKkdyZNGbGXFLE9i8/4LxkmVlPX4PuNB31+eOsDKpqf/EqbY7TWszpf19VLHhzvN/09a3jp3jEIM9kweGUWZ8Lb//PgSiKaKqGIGhc1CGSa3vE+9x5vb14I7nJl3LVA4MA3wyUO/PULS2Sod0rPQFUfcHW42/MCuhzue+Q3G3EALLsRJ/Unnb3vRqy2Vb+DPPq6xT0Z70QCLXT7qqq0GnlL3Rrn8K26b9v0pphwgSLt1bcM2c9d/aNamX6tdGn0NubW8dPYc5OkyeYycmvYv0R+HhrucflWlM1dJLA9j92rcke+a9hrK9sw/tn4e+mOdigMyXWyNaPX6LCrlK1bz2y7ESQdB4NOzJ5dNA/r9rUZ/jpT8PyjhygZImvmXCgNHR02Npx2u1s+mYGEWUHmHzfZSTGNXxKcDo4K4Mq7y/eoOwqPv52HasfzwBOTzGnv301JI4NiYF7btb84bGkxOgwWWXP55m+chemjKvofdnJNd0ZqNWLPubIxhX89R7X3ePIfvHcO88VQNUXbAWC+wtcXmJCVRUcllIANFVGkHTkzX6B05WwrV3zAK47xFfHjaz3rs5f2l2xV7N30gMsWvNbi+01TBhv3FqRnaTnhiwnKK72/7auX+CrYdDwZ/J34lD7ukp7826aLxk7lZ1HTTirq1F1hhoN09BU1ZOFOh04Koo9jTluGtMvaJt+VQ67jY1fTkdfepBnBvXhgm7Xn+4t1ctZF1TVLp68pSvM3mT3nMWe6mJON/uOFvLBwpX8MtblB9JUcayvjuH+f83l3DueocfvTk7l9s5A3fvZ59x/UTzJNYadydF6hnbHE2y9dFcMpryj3HN+OqMW+h4NNoT7C5x35ABiXDqCzgBA/ie/J/OBd8j/9FmkWufgmuxw/8rz+/rOypvTUaLYqmj34Ds+j9lNx7B+/26j13ojGSOJSkpnZXEyepRwl16YFsVbw0rKrTx0oYGnl1fy5FVKm9cvCLzAvs6JQ03w5H3dtVmw6Lv1fP9IekCF7/4wWeykD/8HlQf+R+T5gxEjotE0lYKZz6M6quq8XhCEGg07qV/ux2vTrI44QQqJfoVsPy3ErjXLKdq0jL/efSG9Og04bfsIlLMuqKr9hTXg5LrOIhdPPk5S7EnH1VNVkO5OW/956kKGdAWc1YCelBgdg7JhwNNvs3LycwELQO06AU3TyC93EJeWwO1eARXgk4GKEaqZuUFh/m6Hz2sc2jfcf76BSNmC3VlNhGxhaHfBb7bKXzE7gLOyHOv3U4ke9Ed0scmNfga9wRWAyA47qApqRSFqdQVbXr3bUwzqXWPQlI4XURBxWks9NQxuJKnpZ/MXDh5N6dE9vDniXDplBGacGiZMsHhr2IEyG4IA56fjo2FtVb8g8Fqn2pmxmT9uqVOHWmqp5s6eAikxOgpKKhosfG9onI9is1K16wcMPa9BjGj4s0iSDr3B6KNfAHKFic2v3RMS/ZIkCU1TQqpf0HrqrjRNY/uPizHv/oW7Ls3itvEDW0UReiCcdUGV/y9sNH27pgR8xh6KWVrutPXUBT+xce9RNu/JIScK5u8p9LT0VlTaqLbZg7rrrP0ZPly2hUPJ/el20TU+QY+maexbtdhz3Lfg8d7cO++kYSe4gqT/PHwTc7YYmP5zKUmRAqXVuUQnpBDnpxi9djG7xVyK3ZRL2YZFdFKOkb/7O3SXjQRNRcM1VkZTZOxVZlAUjrw7Ck2ROTrlZH2CKIjEJ6eQ1blrnbS1u/A99/OXghap+GTXYOja7sUATZ3qJooi1z72D/qN7E9UhAG9Xu/zvDtb1ZT5gWHCuKmrYTpAF7CGtWb9gsbLGx5/bZbfwctLDlrq1KEO+v0k5u/O5btjBZSWW0iKFOstfK9dzO5+L0VRcB5YS6zThHPP9+gvvxc01z/8qApVBTloqsyRd0chSTo09aSGufULQGeI4MI/+epSQW4OubNfCHpYcUZWNnmiFFL9ctNYzVVLWDm4UWSZbd8vxHpgPaOu7MzNz5/62sDmctYFVaHoiGnuLK19RwuZOGclC0enM2bhOuJ1Ti7tGMHiMe09Lb2jB1/B8AmTeG9IepPHS3y3+RD/K43jsgGuqdzeQQ/A0O7UOe7zzkBtXD6Pzokicmp37k3NZ9zVKbzxfSGL9gvc96Lvmby/YvaNy+eRFWXDumsZf7gpkae/WITY9Qr0CRmgKsjlrgyUPj6D1MG/p2jey7TP7u73S+umdgehuwOnGtexomKr8vG0MhcXsPX1kR5hcxMTE9vg+zQVvcGIGJMM7XqSffVgIuNPZubcx4Lh+YFhmkNzNay5+uV2IY8WFGYtX0uSQW4R/Wpo7/4GL/s7IhxwSW/s5UXEp6XzwIWRPH9NPH9fWcaX+y0+Plb+ugbd72WziuiPrgdUyPkFZ9+bkSJjXVmo6gpMS95Cn5CJYimmfSfXwF5/2qKpso9+wckuaG/9Al9fPktpsd8jObHGNDTUNFZz1RL1WGVF+exZOQen6QiP3dCd/kPaXjDl5qwLqppLKGZp/XnqQrJiFFb+ZsWo2njxSiP/WW+npFIJWXHloePFfLKlkuvGvIDFXMqsfz+LXJbHBzVBjxaZxNbS+u0Q3EHSu8MyGT1jAzdf3QsAneogobqY1V/MYNCYk75LtYvZV33xMUc3rOCy9jqijXa2H6+kR4LMoTXT0Q95CUHSYUjpiBSTRIcHXEGGqqk+X2hvAcr9/CXX+9vtGJLaozMYcTrsCDoDUkyiR4zaPfgOdtMxzx3csQN7KJz/MiVF+T6fszT/OEmZHeq8D+DxnvF39xVIzYEgCKT1H8WBnz+l76BRiDp9ndeHCXM6CIV+uV3IzVYnDpvCi9dFhly/au/ZOzv13h0p3PiBa/DynJ2+WV/vI0L3Z313WDK3zTjE649kASeH0E9b9BN/eWiI5zN579fbpqL/1GN0T1bIqVLIjqlmz56V6C69x+VBFZNE1phJgMuw0x1M+dMwZc4LOOx29EntEcCjYbX1C/BoWO7BvTjmvVRHvwRAkvR13gMa1i9oPXVTmqaRe2AP+3/8nB5xTl4fej7pSb1O6R5agnBQFSTNnaW172ghO387xJcjohm1qIKhPSSyEwWG9NDx6UYzz1+b7FNcCf6LPhtK4ReWVvDi/B1c+cjfAVfAI+VvIyPOSLe0dIZ2r2R/Zv8GO/jWfDEDpzmfSDmVkX11fL+7lMSL01h3sIx3bonise8+5+o7xhATn+gJwLw7B+dNn8eN3aPYekDmn9cZePHHMqYNiuCO+ds4Nu0BRFHixMfjEPQR9e5BURQ0RDRVxXuWscNcgCxKSHGp9V7r7uyTZSeCKCLVWDkIhigyRv6b3Cmj/Fo9AESkdCD7kbf8Ck+gKW59TCJJV47gtx8Xcs5NZ+xouTBtjObqlztLJdnt/Ot6I08trSIpkpDql789e2eneqYZea5/cqMmndMW/oy5zIwoC9zbV8eSPVZGR0n8dLCSSbdEMu679Tx553VomlbnKPHG6S6biuQoiQSjhtlSxa09JTbmWRA3z+X4pqWgaegSMrGZjtc7cNhhtyGINTdVNRrmNBcgCAK61Po7B4uXTqJEU5BlJyCgi3Vl2t365Sg6jGnBy0HrF5z+uqkqSwWHt6+hYPMKbuydwvOjz2uV1ghNJRxUBUEoOlzc4xl6pkjc0EVERCPOAJe2E3jw6zJm7nBSUWX3FFeC/9R2fSl8VVUZ//Fqrnr8Pxgjo7CYS9n780L+dY2OV9dUUVrp9LFL8NfBZzGXsvW7z+lolFmwxcTgbjpGLy7k/XVm7u4JSRECgzo5PEeF7iyV+ygxIUJkUFY1u/McDDsnkg0FKkN76jgnM5L7zld5d5OMnJKN5qhCc9rInfH7mr0rlJeYfPaiqSr6lCykGNc+dYntEXQ65JK8Bn/O7s4+h92GXF7oaX3O//SkJ5a/URGhxJiYgS6zF8e2/EzHfteGfP0wYYLBZLby1Y8bePNaJ7IS2yT9cmepruikIzUK7jtXz4qDTm7M1odEv/zt2V+2qbG9m8xW5q5YR4ZBZdaWcgZ313H/4jLeXWfh9h6QFCkwoJPMp0vWAvgcJcZHiB6bipmbyumVIhEfoXFjtp72CSLL99upqDAiG2JBlSn68lUESYdiLcNsUUlIzfDaiYA+pSZDJoroEtsj6g04ahWY10Z12ujwyFSXfpWdwJDmOlb01q+E1IwGvfhaG8f27+TAL1+QKpZz6+86M2B82z3ia4hwUBUE/jpcgp2Ivmf/Ef5wq57fihyMPk/Pk8tsDOwOmQlGxl0dB+0v4uct+/nphInfTfXf/dJQCn/68i1kXj0SY6Qr8t+4fB43ta/kgkyJW3vqmb+xiCeube9TP1W7a2/NFzOIxcrUoTE8tbSaMTf05K5LS/lmt4Unbu5EcrSex9OcnsCstrO6rdKKTnFil+1sy1UxiBrdkyWu6Gjjpq46PttWSaHNQrsxk1GdDkS9q57AUXSY4oWv1PvzEw1RFHz2PIIooVSaESNjESQdoiGq5s+k9ZHY91oKfvoUS9Hxxl8cJkwLMnPpOvq3dxCvU4IeHOxmxa97OXCimud+ZyRKByPP1TNiYTVDekeFRL/87XlIN5EUg8OTbXo+zVhnbFftrNe0hT9j1Oy8PzSasUurGX9jBx68pIIvdlfx4s3ppMTo+FOqy8PPGBlDcenJwv+KShvIMr3SbFjsKlvznMQa4MHz9VzWXmDa/xREuZzMR/8L4NEwpymXovkv1f/DE0SXfkk6FGsphthkZNmJFBnXavWrOWiahik/l/2rv0YtOcL57SP55KGLiDCe2eUQ4aAqQExmK5t2HyYnQvPpcIH625f9uf0+fkUSV54Tz4HcIpyyzCUddIz6WiEpFsBOu8L9jRai1pfCX7f7GL9a0rlsoKsw3X0sN+5qjbIqlcvbC4xeXMjMHTKSJHnqp9wF7Ku++Ji8g3soPLqf2ztLoKpcmA43TD4IwJCuCn9ckMN/7s5GURSqi4/y/Zz36jirfzBhFNaiY9gsFtpFVmKXNWQNnlxSzfQx53Br7xw+3lHgultTFQTR/ddQoyHa3/cq1UXH0CWkUzznT6QN+6OnlqCgpubKbjrW7LbiYKndLVNe4mqdFjTXXaumaWz69St6dUpHFMWwj1WY08LKDXvZdcDCV7tFSqurAhocXFvDBl7am4EdqshMsGORZQAu6aBjxBd2kmLFZumXv/d2H8uVlZUysJsr2zRzhxOd5CrSdo/t8jYAffy1Wew7ks+AziKKpnrsJhRVI0Z0IggC+eVOxsw5xjVdE4nr2tvn/V0TK0ycsEOeqYKkSKh0wLhldqY/1I1b+xzmv9uUk9mmGg0TxIYLx6XoBFIGj0efksWJj8dx3rj3PPVQ3voF/j2tWorG9MuNu+aqoXosVVUpys3h4NpvEMrz6JsZyb9u6kW7lM5txhKhuYSDqgCZuXTdyXl7foZu1jews+EhzcG1Qrvfx59r8IiBlzJx2W9c/9Qbnte6j+V6dO8KQDowusR34LF3196YufOI0cmIssQTN/cmOVrPE2lOVpWXo0YksuTQflL1Vm6YfBDZYScjUmXLykXcNvb/fPb4+BuzXMXxL4zgD31F/m9FBc9fbmD8ChtDP8hBb4xAJ1SiT8jAWZqHW4skfQSCprL19ZGomoqqKqC5xEjQR2AzHUcQRZyleTitpVSteBtwiYDqdPlrnZjzgmsxDaryDwF4DEdbioa6ZdyjJo7u3kzCgS/5w12XtehewoSpjwGX9GZA+yq/GlZfjVP9GgYn//kIzs7Bn37V55buPpZLiUmjOzDOVHffwydM8hk5Vpp/DEWBF2/OIiVGx4upMtvnWyiucKKXy7l48nHsDpnUCIWP/lfGReW+QaX7c5jMVi4b/QqoIk9fquO/W50M/SCHiOgY9GKFq4sZvDRMRRRELKXFnjFYqqJ4BioL+giXfplyUarMPsGJThRw2O2++lVwqLH7zJAQiH41hOx0UHTsAKtn/QfJks/5HWJ4c2hv0hJ7tMR2Wz3hoCoAAhECfx4nLTGkuT634cffXEC/B/7uMc6EwAYeuwOvLskGBmVVY9d0rDli9YyOcVst7Ey4BNFWxtQh0Ty2qITSIgsfDIvj7nkVHN6zjS7nXODzHqsXz6B/UjEHK2O57wI7XRJExlwUxbTteto9+iHb33mYgtkT0BQZXa3uuPjkFLIfmUxBbg4nFv2bzNET0TQNuSwPag21cd8hGW98BmNKR/Jmv4DqqEKxlmJaOhGXKgkIOiPJA59CrjCRO3kUmuL0jHM4Fd0wnfpcxPqdq9l6MJ8Lu2WGbN0wYQKhMQ3zV+PUEhoWqFs6BGYC6m/k2EdDoxj3dZnPlIxrs+CL3U7mju7Ig4vKyS0s4+PbYxm+oAq7U/WxV3B/9pueeYeeqXrKKlVGn2/A6oBP9+lJHzONgncexj57AoCPhmmqTGxqhidI2TrpUTLud3U3O0uOo7rd1b10zFvDTCve8+hXyfJ30WrGDwm6CB/9Ag1DjQ/eqe7m0zQNc3EBR7evoeLIDmJVC307JTF2WA/SEv3PaDybCAdVAdCYENTncdLULpuGOmP8CY3NqVCumri1x7k+9VG1j+W8n3P/3t21V1VeykMXGnjiGyuXt9dxw+SDxMSe/FK6ndW7pUVyfVopuxSRRKPKnb10zH3tWf5v5s8+77P3h3k8eo3Cq2uKmXVHJBabzP19BT7fYeHQe4+hARn3veFpLXbjbfyZkZVNSUw8xXP+hCw7UastGGJdXS71dbg4y06QNvzvoLiOJtzaVTj/ZcwrJhOV3skz2d197QtT5vo1vLNaLY3O0QqGS4c/w5uTn2XO+HBQFebU0pCG1TdwuKkaFqx+gW+9lfva2gGc93Pu3/sbOWZAV2dKhttZvWeakctSq4iSRdrFCNzWU2LRnhwfewVw1WSZiovJVwVWjo4hJQqG9dTx9b4KH/0CfDTMW78ADNFxFM/5EwD2imKMNR3LkWkdya6lQwCqo4qUIX9wjbYRBNxtz4XzX6Zo0d/r1S+oe4xntVoatFYIhvwj+8jfvx1b7i5EezldUqN5+opudL7+zK+RCpaQBFWCINwMTAIkYLqmaa+FYt3Wwopf97Jtv4mZ26o9Z/lwUgjq8zgJJMXtj4Y6Y/zdKT4+ZSV9R/8LqOtq7k3t59xZqoQIkVKTmW7JOob20GEWEkiriva4q1vMpcz8090MzLRQXRnBHT0EtuRBoUXhyYv1fPHZcZ9s1erFMxiUVc32IhjcTSAtSsMgimiCxP0XRvLBTo0qh+QZ36B47bG2oZ1bQNxT2c8b957P8+biAjRBRPjydQRJ57mzQ6r1V1ujzrXenIoho6IoknTudSz/dT+DLj07U+OtlTNZw0xmK5Pm/0RitN6vrxNQJ3iqbzhxIBoWrH4Fem3t57wDRaesIik2hp8j8stxiRdvzmL7fAsL3nwWTdMYPmESz18fw76jhdzWQ2RTHhwukxnWQ2LVESefL1vLk3deR3J8NCazlelfr0ISBcZcoKNrIhh1Al0SJW4/x8CcA776BSc1rD79Atj82j1+Nchbw9TqCjTZDpJvoCKIIqiyz3q1CZWGWY/uwmktpbq0gF+mTSAjGrKSo7j93Pb0uflixEZqx852mh1UCYIgAVOBAcBxYKMgCF9rmranuWu3FgZe2htnRRFDBlzld15UfR4ngaS4axOsOd/SXw9izL6UiKhov67m3iNnaj/nPh78dP0BdEoVMXrQNIW0uHKGnhPtE3zd1L6SRJ2DsoJcuiWJ3NZTx/c5Mk9cbOC+c/U+2aodPy9hp8VJpV0BNCb9z4EogiQKpMToiREUYiJi/I5YsHs5nzsryznx5eu0u/1P9X5+TRDJGPFPz7Dm3MmjMKR0RFNV1wtaWW3kudcO44N3nqf/+Z2JimjZWq8wgXGma9jMpevo8rpJcAAAIABJREFUmijWq1/DJ0yqEzxV2uoOJw5Ew5pjLtrQtf6e8856VVTaUJ12ovUamXFW/nh9qme/4AoaBbkam91B1ySR23vp+PaAk3vP1XN3Hz3LD9h4/bNvye7ciY+XbURVFJwyfLjFyYebnSfrPkWB+GghIP2C4DWsYPYEEERfDQOkmCQUa1lAP8dAcNht5OzeAhpUm4sp/HEGSlUFhsho4lLbkZCUhiMxjlnP9A/Ze54thCJTdQlwUNO0HABBED4HhgFnhCA1JhL+0uoDOinM2FTBV/ucPmsFMuQ0mJS7oqh8uuoQA55z+TzVdjWvPXKm9nPu48EPJoyiouAIxWYTCQaBXfk2dhQcw8E0flixFKfpKBGajaW7dJisTqSaQEXVND7e6iQ+QsBSeRxreRmaphEfpWfOmHNJjtaz/8AhRs0vRxebjCRJuGe6a5aTAlGf8JRvXUa7qv2Ub1kGHX7X4M/tdBJMTYMoilx01zO89cV0xt5yfqMGiKGY0xamUc5YDWuKfg3pJjLzx83oRK3Roca1aU7ZQ0PX+nvOO+s16PeT2HXgCEZJYHu+g8xXDoIAsvY9AAZRYeJPKrKiIgqgqBpOBSb96iBC5yoU37VsA3dNGInsXMv3z/nXLwAngekX1NKwU4hir6aq8DAAjmorq2f8E50k4rSUkBZnQJGdDD4/E6NRxw8ZcVTkrEUHqID5MJiB1Lj6jZndNKZPZ6N+hSKoag/kev3+OHBpCNZtFTQmEv5rBCKC6uhzE0xnDMCsH3bS6Zq7Ad/6qIJjh7k22cmMeZNZuewrAKIsxxh7VyyQwg1p5Z7nxJpjMrlS5v4+RsZcaGTmgRi+lm4k5SqXE7hpzVwGmT8nLTkRm6WU+y6MIVaopn2cyNvr7TgUjXyLxj8eHAiiyH3dq6jMN1GpaUTr9AzpZWD2AQVdbKLnfN97gKi38KiKjGopYv+0x4ipyuMvgyJ5Zvks8p1fEFmPA7EkSThKT5qBal726wIt30ATbL1CRsdsflwm8uHiVY0aIDZnTtvZKGhN5IzVsKbpF3TMaHn9qu/avUeL+V2yzOi53zFpyXYABEshc+6KBuIZ3S+aSyevZNKS7Z5Ax2ou5YZOOoZ017EiL4b/iRciJWTgtNsAyF//JemRIuekqpybLhFh0NE7UWFHkUJSpMCTFxt56Qcbn736HKPOFZutXwc/cN3MRlmOeTQMtf4gxa1hWk0tqLeGeeOsNOMoL3b92l7N6s/eQNRch49VJfkU/+jyzkJVSO7YDUEQidAc/HfMeeh1/q1mtk1/1u/jgdCYPp2N+hWKoMrfAUudvxGCIDwGPAbwwYR7eGzYlSF465YlEJEIRUefm2A6YwB+3FPIVeNODkt2u5rnFSlkdOzMXReX8rV0DQC3Kt8TZ7ACEGfQuOviDL6WriHlqpE4K8sxz32O23q5vtB3nR/HVwtX4ux3i2sa+28rueM6A2+sr+CEWeG/m4oRBKix6UJRIdooIRmM6BPSWXz0BAsOyajVFRhiI4EYIlLa0fH+Nz0ZHXd2R1VkH+GRjEn0SNEhp7ZjUKrAlf1SGFNp4sNNdpxKFXtfuw19YqYnGBQFkYysbM/PpFBVKJj5nKvAUxDQVAW9tRBVEepkk071rCtvsq64jU/+/SjfjK7/mKS5c9qaOzj3LKJRDfPRr7+N47EbW39NXGvXL28mzlnJxekKxZUK+4udCPHptE8q5IDgKl/olljOZ9uqmfNbMbERelIiFHIMScR06oPisKOWf88F2RkUWIvpkxHBd+s2kTr8H8RndsdZWY5jy2K6JBswVTr4/rBGYUWlR8MUFebuBqtdBU1g8dG4ZutXpxuGAdAjf7GPhh38YCzOsnwfDRM0jXijBMYozJJI/md/QDTWjG3RXEeAorMKRRM49N6jGGu6/lIi9Uy+O5v4GFcx/qofv+ecG+6q87Mt/iWy3oCqOTSmT2erfoUiqDoOZHn9vgNwovaLNE37EPgQgHWTT4H7RvNpjkg0hUBaiN1s3pdLRKd+nt972yeYSyzoYlx/BBbLPDRNZZbgZNZaFVHaiqooCJIFqd0WCjpdStW2pYzoIhOnV1FklRidzJBsmaU1Keth3QUSI0VmPuz6h+TfX+5hwW8CitOOpmmoKFTq4lGNsUhXP4YkSQiK4iosf8p/cbg7u/Pj3Pc8wvNgRRGfbS7nLwMzeGruBu650dWeO7JfPN8cKKfzJTeRv2YORarMuHfmEhOf6LljdKfg0elJGTweTZWRdDrsO7+l3Ymf2G9SGPf3d5n2/D2Me3s+6VldfPYTExPrV/BaKvA6tmsDCQaV7GRDvcckze0gbe7g3LOIRjXMR79+W6pRmnPKNtdUTqd+aZqGQ3YFBCmHd2IhGrUm+1JeacMqxKCrCQ5URWXl2kM4qhRm7TJTaalEjCgiwmjEXpmDpqkcFJwcLFQRJUvNNRr6LJH06x/iyNIp3HtBNIN6R+Eo1WFIiiKvwsn8LSuIHtyd8q3LGNZLx8t3nuPZ67+/3MNnW6vQiRIqCoVKDOhAjE1BN2h8s/Trrds78PiXC5FE+OtIV7fyjd2jmL/TTmRSAiXWXKzl+dw+dCDGCCOfLfoW7dgmTmxcTo+rh7Fr+Wek3/NPNFXBoJOw7VxB5okf2VfkIFHn5Lt3n6J7VlqdPcVHSqx9dRSxiSk+BsgtZSzcmD6drfol1JdmDHgBQdAB+4EbgDxgI3Cvpmm7672ojQRVbmfd2rRLCz41Xh+XjJ2KyWKv83hKrJEN7z1V73X3/WcZV46diN5Y9wvjPQtqx5SxdSafe3fS5R05gO2n99FZC1AqSwGQopPQVBkxqSOVeQcwYgdVRfT6oipSBEJCe7IfmezzHu73Afx26x38YCzZHdszcsJ/0DSNWS+MYM498SRH6zl6vICH5uUz+NwURLuZh6/MIDbJ1YI8+WeXYH10axSPLyggu/9wdq75Fl1CexyyjGwx0VFvZr9ZwqmLRhREYhMSiLIc491BkTy7wk5S9nkIxzchdbmcR1/9tM7P7ce573H0hxl0umFMg8OmgTqjfYLBbYz6+0tEItVqru8awfCaTiXvwlx3AXFKjA6TVa7zmoaYOPs7yNtcr1ntKSe9L3S9rpW1DbgIWsPaSFDV0vplqbRhrbbT/9kPMJVXubzkVA1BENE0lajYeO564DFApOslNxIR5fp7KwgCOn39TRqN6Re4Ouku+vM8AA58+BQGuxkA2WLyDB92GBNwiBEIxQeI0Ow++iUIAjZVouefvw65fi14oi/jPs8hSlLolBxJrEEgUqexM9/O1gKZ94dE8dACE1WqnrUfTeC+fy8g90QRGXoLBc5YzDYVVZAQRYG0+CgESyHvDopg3FIriRGQ3a07i9+o+2/DxNnfsWTlLwwZ0D+ghqjmHK01pk9nnH6JerjsiYD0q9mZKk3TZEEQxgErcLUjf9xgQNWGCGVqvD5MFjt9Hq3bJutvlImbojILYkoXvwFVU4i/+98AnPjkWRzWMtIHv+LJ9FR/+ToJoyfiLM3DYDSiKAqK7KR4/stQlI/u4F5k2YnTYa/jOeUPtcqMrrCUDcs+B/AcWSqKQpRi4d5zDczeWYIIfLItl+iEakRRxFZpJUZy0M4AD/UzMmnlfDrEauhSz2HEn95m1gsjmDokk6eWVHqsIJbNeIvOB2dx+fndGGUu5r+rN7F0VCx3zd9AYe5h0rO6eIKjoY+/WG/npD8asq5oDPdR7YDeCfx7SRnDz4/xO3C2qVmG5tS2nI2cqRoWKv1SVZVyq40thwrZecTEnnwLhshYyp0iMakdKaqCbk99DIAuIsZzXc70p7ngphEh2UNDRN30LMaagemH372PjMGvAGBUZSxL3iLz8Vk4S/Po2K23ZyxM/uwX0AQ4VqNfDrsNQRAabRZuTL+uypJ45NN95JntWO0aRy2Sx4anotJJlCjTPtLBuekSueUyz02cx7JXx7hc4YdkMXZJlU/g8Y//LqFifynXn5PC7YdsGCSBxb8d4kBuEd2z0jzB0atP3hFUZqe5R2uN6dPZrF8h8anSNG0ZcGrbG85iZv24m579H2qZxVUZ09K3PC7BarUFZ+lx0MBht9fM6ROQohIAAUNaFwRRQtAZ0GQHssOOUjMTTJadHDu49+Taso1IRxl/viyOV1YtRotMYmup68jSNYS5irgIkQ4JRmY+3Jspq10jdS4edA+zXhjBhzfJxOstDOtlYPqmKt68KZpHl2zgh7nv1+lsvHjQPez9YR5/vEmjsryUWzpWscio8vIPVQzrKfH1tFd49NVPPcHRN+/9rd7Oydo0ZF0RCCePak9grnDwy5F8jHrJ55g3mKPg2pzqY58zgbCGncThlNmeU8i6vSfYnV9NtaxhTO1CTHpnsq4cwQUxsT6TGxYtWugTTJ1WNDAtdd2kaorso195Rw6gyDKCqEMDpKhEl35JOgSdHkEQUZ1N069qqwWr1Uq0QeC8TAOrX+zmk2FxZ24mDwDZUUW5TeOf1xkZ920Ob835zu8xmclsZdHK9bx/ExwrKOHO3jruXVTNsF56JkxewOI3nvIER3+asiDgo7ZQHK01pk9ns36FHdXbGLKs8GuujRuHuWqC/DmBm4sL2Pr6SOKTU3BaSz3pbPc5uyRJOK2l5Ex/mjJTEfoY17m/FBGFgVTaPfiOJ9W+Y8pYBFGPLiEdTdMQdHqcJcddwqTKaJqKYIgk/9NnPZ0rarXFVSTutFOyfBLgEjiD08KDfSLoGCfUjL65mOOH9jBywn+Y/a+nsRYdowo4bIOrp7pG68QU1PjMZKtEKRaS4iVmb7My6jw96VEa9/SR+Oy7Obz1+5P1V/fOW4ylvIxBWdWc1y6Ko2VlxOCkR7JEUaVCP4OEaecGcvZsY9+qxbw7LJPRMzZw89W9fNaoL1hqyLoiELyd7suKC6j64R1eGXW1z2uak2VojqCFOfvQNI2jhWXMW/UbxyrAZJdI7tSbjr97lN+lt2vxQbi1NcxcXMDm1+5BFERUTa2jX+BqUHEXjXtrmCBA1hiX5thNxyhZMtGjXwKuonBBp68Zfqx59Kvg0+dcmlUTiAWjX8PHv8akp24jI1kg0iBR4IDfTXV997wNogdlg06x8eV+B8N66vhde4lbe4jMXb6OTb93Zdq8szLvLfqZG7KcnJcZyUGTg/XHFbLiRQ6bVfaXHuLX3UdY8stG3h2WzG0zDvH6I1l11vAXLDWn1slNY/p0NutXOKhqY6zbe5z0cy73/N6fi+7u6eOxmY67Bg2riufOS5Gd5B05gCRJJKak8a9PlvDig0OIGvgcDrurrsu05C0cdhuK7OTYwT0o7nEvNQhux2Dh5O/T7v4bBmNEnVoE71qFquN7iFwzmXuvagdVJxjZL56Z739ORrTm45nljw8mjGL94VLmrK8iUidQaXcyeVAEz6+wMf4KI9/staEprqJY96zCj1Yu4jfNyS9HLZRXOal2qkToBN4bHMlrax0M66ln/hvPM+o8He0MVYzsq+P73aV0u7a9Zw1/wZK3dQU0HoA1RmJqBttNjqCva4hTcWwdpm2jaRorNh3k22155FeJRKd1ovtVY7kgs8Mp30ttDds9fTyKrQqntRRNVevolxv3sN9xQy8jecjzKLLs0S8AxelElp1oNY2crv8LJzUMl36lj/gXKDJ6g5GqghzM374blH59/upzLH/jIfp2yaj3M/68ZT+/Hbbw/hobmqbx2e2RHCpV2VmoEiGoKKprd95ZmUU/baGqQuaHQxWU2xQ0BKbeEsHzK+zc1kvPE6/N5P5z9aQYHNzbV8eSPVaeTzM2mNlpC0drbV2/wkFVG2PN7uN0vO7uBl+j2KrIGPFPAIq/nUrJMpcwKJVliJFxKFVmDLqT3W25s19AcdgRajreTnz4qGctb1ff2gh6oydDJQgCcqXLEE/AVWDqsJSSN/sF2t/3KvKBNa4uwmgd9hoH0Fis/O26VMZ+8R59rrq5Tkeem8ffmMUHE0ZhLTpGcYmJe3rrWX1MwS7D9zkyN2TruO6dvSTUuBnLikKE4GDJ0y4Dv927dnPPAitDeujJiBU4N01k5lYb0YbjjOx3Pray49zcVcfoxYXM3CF77oi9B0+78bauABoMwAIlKutc9hzO55wu4ZmAYVoOWVZYvmEf3+4ooNiup/2F15M9/GH6RJ8+axF/KLYqT7a8aMk7dfQLQHHYPK/XVJn8OS+4x+TV1S/VexDWSdz6haah1bxGqTQjCIHr1wPnJ/D66p38eUoJn748pt7A5Ou3xnHr+Cls3nuYu3rVdFNvd6BokBkj0O+dY2Qmn/xzSD6+l6Qoie8f7MyJgkI+3GRDFKBDnMjAbjpmbHUQYyhldL9syspKGdhNx/2Ly5i5w+mp4fKX2WnrR2ttgXBQdZpJiTX6LUr31waraRp7Cu30T071edxdfOlGlp2eu7v04X9HLi9C01SK5r3kqh8QJGRV8wwLfvHBIZQWF9LhqZmoTjui3vXemqpyfPK9FM57ETEyzpUWR/MEWlJ0Qk09gpG0Ic9SsmQiyUOe9xSNVhcd89Q3KPm/Mf+Yjfm7TiBbLegMB7mru0SmoYphXRVPjVN9uDNZL9/Rj3m7rRgklcmDInh6uQ2HIiIYYxj/ySrgZJuzO/DJ6NKD9LhdjL4ik3YdMngy08l3+UcZ2jfW9ZroLqQCo0tcNVwNBUfe1hXe+AvAAqX7FYOY98Mk/hYOqsK0ABv3HmX2qoMUyNFk9ruRPvdfFrImFwhuooA/yktMPhkod+OLIssk3zwOQdL76BcAkuTRr4TUDKw2mYRbnkMXn+6rX1NHUzjvL0jRCR79AtBkB5rTdSMpGCJJvvFxojKyOTb9KTo+MhUITL9WHajg3j5Gvth9tNHA5Ou3xtF52J9ZuNfOvN1VGCWNdwdF8MxyG6KkY9Nnr3he6+5+S4nRQUY6e0pymTzISHJ8DC8PjmJVfuH/t3fe4VFW2R//3CnpCSQkoXcBURALsP5EREWsKLoWQERRWRQXlcVdXcXVVddVd9dKs6AiShV1XdFVlmZDBCmC9CKEll4nZTLl/v6YzGQmmclMMpO8M5n7eZ48D3nnLSdivpz33HO/xzUKLT0pkz7AtHz/u+QifWktElBJVQvQVNuEupzzu9c4lFPGqs21br5F+bkYzGYSOtQaYAq9wdXzBCClnZj0buiT0uh4xytY8o+h04Fp9Wuua6S12lF1khJbWUFNzwGg09ckUQJpt9L53newluQCEmxWEIKcZX8he+njxKX7XjpIuHy6azv0wTem0inJzu9GJSGKjvHABQn8e0HtjryGePrjra6k6YLB6dxZmc+O1Cs4cXA3ppIiktqksmfzer45fIIl2yvQ6XSYysq4qY+eBCoBR3Xpqu7VLN5UwOKN2SSmpLqGhPpLjhpapmwqqRkd2JVfGfL7KqKXElMl87/awbasUpL7DaPPjWM5s227Jt3LW98m4HIXD3SigK/72GxW14sY1OiXodZyoa5+AVTn/opp/VyP+xT891WkzeqpX7V3RdptdJ76DkiJtTQXrBaXfuUuf4KYlHR0Ru+u59706+YLE/hqezZXd6/i68OST9b86HcZ7cinjjndzqRpzJA27C8vYsluSUFJueva/23aw+5DBXzwcxXF5VWMOc3RL1ZWUUWfdimM6m7jnU0lzN9YRLs2iehqnJj9JUeRvrQWCaikqgVoim0C1E/Gsk7lYUhMA2I4c/KL7Jr/UE3JW1KZm+U6T9q8l7t9UZyXDTo9NlOh61jmzU85yuJI9MY4pN1O9pJHsZbkgN2G0BkQxlji0rugq+nR6jX5RbKPHXb1Vkm7BWtZPkdn345O6FzDRo22Sq7tE0u8tYzYOMhMMjB+gMFvtQq89zQtm7+MZKPdtQTXf8jFJJT+SveRE7h0/FTeePg2vszO4stPweHzCJBCXLKe9sZy13laIhPSsNns6PVqAryi6RSWljP7s23sKtQx8Jq7GDGmd9D39Na3CXitTrlTN4lyNpTr4xJc+mWrcvhbBaNfAOaiU6AzuJKpzJufct4NKSU6fYxDv4qzG9CvWY3Sr21ZJi7oIumRque3pxv59kRgTd91+5qu7g1vfF/A3BXr+MvdjhfmUUP7Yy7JZfQox+Dob3Py+da1NzUXiKNNio40g5nRo4apKlMYoZKqEOKrInUqr4gzm3C/uslY/pK5ZFx5P6c++BPg6D0wpnUmf+VLtWVxHL0HQqdzTjjwixQ62t/yFDHtHdWuU+9NJyazJ9X5WQib1VWBEjodxrYdsBSeqJna7tiZo3Nr/HQfGQNgzezoaih18sbDt7Fk+xHmr88nPUGHXucYFZFbucVVbfJF3Z6mtnE6rupaiT02hW+/+YQzhl1Rz+7AW3XJacD54uim2SKEmtQzhrNm6zYuH9JPsxgUkcuPe7KY99UeZHJHTh91L5d16t7oe/iqJBXnZTcpprrJ2IkjB4hN78bJBY5Zc87eqWMLZriW2cChX5b8YziW6gLceagz0Pnet9HV9EnFZPZESokl7wgCiM/sFlL9WrYji/zCIuIxk5GgQ6eDjEQDK78OzCPK2ddksdox2KqYfF4M7325gftuugQppYflgTfDTKdFw7zRkec43tpRSVUI8VWROvHc77yc3XiEMZaTy57AUlboaqTUJzkSAV1MAp0nPAfAkdcmULjyX1SX5CFrJgrrE9sijLGOREkIbFYrRfm5PDdtvEtUXO760tdAT4GtNK/eUbu0N0p47/nHB64lvGnD013HZ3+b77fhu+7SnsPfykKfzAqu7ZsYsN9UsLYIoaZz7/58t2aVSqoUjSKnsJQnlmzC1uEszr/3JfSGpku6r4rUlufHBhOiBycWPeqhX8fefRBps6KPT/HQr6IvXnLs/rPZHJ5SbvoFgJQu/UpKSqYg51TNYVlfv0TtH4wxsVjqjKa1SztF+bnsmv8QZ06ur991cb6kvfPgNdzcvYQZF7VxffbSNyV+PaJmfbiO1MQYFu80U1peBdZqUuIEsdh4b2WNhYwfy4NQ2CIomgeVVEUIZlMJCB326graj/0b8ZndqMzNwpjeFbvFTM7iR1znxqRkcNa0eRyef79jd9+RQ6SPfqjGuBOQoDPEYExKw2Qqo01NWZsARhZJux1jWmcP8z9jUhq28uJGNas2teHbfWnPaQrqHBORW1rF+3P8+035skU448Ir+ez1vzVp9EywpGZ04HiV//MUCoATecX84+MtFBkyGXTLTFJS0/1fFAaERL9wvGA69evZBSuZevVgxwc+Xgbrfu/ew2VMSiNzzJ84sejRgIeulxUXkp9fwOJSW6Oavhd+voGebXWMHjWM26+5oN4olxuXbMIuJZ/c6tAmb5YHvmwRRg8/m0fnftzk0TOK0KCSqmbg+7eepLqqtvHYZrezbtbDxMTFM+x3TzVwpW/MpmKMbTpQdWKv18+lzerqBXAaezobSZ+bNp5jK/7qMshzoo9LAGo8koSobe4U1Br+idpZWNhtZC99FENiKnq35UZ9XAJJcQZXmdx9GcFkKnMNPXbGA01r+K7rZG6uqvRYCoy3ltXzmxrdy87s6Tcz7ZUPXYmSL1uEz+b+FUPeHs2qVqfKW/yRigijymxh3hdb+THHyNCbHmdA2zT/FzUSZ6+TE7vdxo7ZU129UM1BsPoldDrHmBmdrla/3BIsc34W2G0cnX070m4nJiXd4z4duvaiosa7LxD9ytqzhQ+emMhvzuwR8M9Y18m83Fxdz95gROdqdubYSE9ybCpoE6fjvFQTcz9ax1/ucsThyxbhkdkfUpJ7UlWtNEYlVc1AdVUlXe982fV96amjGA16Ti193KM5vTHTw7P3biG+y9mU7lyNtDsESNqtSGs1OiEwGIyu3SnmGnFw4rRN6DV5lodg2qoqKKppTs9AIq01AmWM49SCB7GVF4GUpGU6TO269e7rsUTgbuWQvfRxZk4aTUlBPtbqKmJTPY3w9HEJ4KVfwx/ug4vrLtl9sP4zdgubq9pVVpSPtNuwyjwW7XIIalW5CYu5wiNR8lYls9ps2Cu3sujO0+r1WAUzPLkxxCUmO1zrm9nBWhGZrNn2K6+vOUj/q+9i5LVnNdtznL1OTiqyD6M3GMhe+rhHJSdQywQner0eabMGpV9Qm/Q59WvmpNHY7TbHkp+12qVfANayAgwGI23apdOtd1+Pe7nr14kjByjKz2XatecHpF8lR3bRqa//6qD74OK6S3YL127FIOwela7conIstlpH9tLyKiorq2h3YqsrqfJmi2C12amsKOQ/d3Xy2mMV7ABlReCopKoFMBr0DOzZHl16Coc/mBHwde4eVoVFxcSkbkVWlRETG0uHrr0colCag81qwVya5+p90AkdMyeN9nizclJXMM35WRSvfp3cZU8g3MZAAOiQdOnVx/W2aDKVUZSf6/KUsVmtxKR1xhDjKMX3mjyLE0cOkPPvF+g06RWq8o+7PK1ylj1OlcBnXL5wzub79qN3OLL5K48lu88OlDDx+Y99JjplxYUsfORm/v4bIzPXr3AlSt6qZM4eL289VsEMT24MmWecz2c/bOO6C05vtmcoIg9ztYXHFn5PZftBXDrtnxgMxhZ9vt5goHOPPvWSHX9486/ypl8A1SX19ct5j4Y0zDlOq/C5Wzjxxt3odJ4aFmM08Np/Nrr0a+ak0S4Nc9cvcCwDths9w6VfgEvD3PULgIoi3p34mN//Bs7ZfHM+XMf6TT97LNmtPFjGh/98yGeSk19s4saHXmHmlTE8u0nnslzwZovgtGjw1WMV7ABlReCopCqEOJMgi6mUyrzaKohB37TKg7uHVeb1tcuGuZ/+E+c7ij4ugbaX3UtsTR+CO/62PDs5c/KLHJ5/f4OC6axQ7Zg9ldj0blTlO34+u5RYqs1Ya0ZI2KwW1zXSbseY7phHpU9KJfPah+jco0/Acbkv9936/lImntemUU4QoYLOAAAgAElEQVTmm/+7jMs7l9Mz2cKozr6b0RsaPSOlDGp4cmNIyeyKKWtTs9xbEZl89sM+3vk2i/Nv/SNpHZp3hIwzCXKfFwqeM/cag3sy5L6kVle/zpz8Itl6Pef9eVm9ewSqFYMfXd6ghrlX2HfMnorE0erg1C9wmiZbPK5zapi7fgH88sKNfmNyX+67/v0N3HVeUqOczBd+voERnavpnmxjRKdqn+f6Gz0TigHKisBRSVUIcSZBvW57iTN7tg/pvSstds6sU7IGx7KbL+PN4rxs15tVSUE+W54fi91u4/h7D5F2xX0UfDkHaTWTA9gqirlv9FB0Qkfn7j19VpL0cQmcXDCd6rJCQKJPTHX0YxnjcE60sVeWunb16OJT6DD+7036md2X+5JEJQs32Vi+y3NOnq/G9rLiQvasX8G0Cy10TTUypqeFaW7VKm/P8ZawAS26S9D7rktFtFFWXsU/VvxIccf/48rp013mtM2J83d+5qTRruQhVDiTGm/6dXj+/Qgv/i+OGaa5rrYCZyXLbqnGnJ+FzWqh4Ms55FgdSVFjNCx7yWO465e02RA1xp9O/QJAZ6DjxH/Vu4dB5/9F2X25L5Zq3tpUyrI6+uWrqT2/2MSnazfxwoU2uqcauLqnlUfWbfKaEPkbPaN2CrYsKqmKGBy/xHV9VMzpmQD08tJAKoWu3hbpE0cOULDyJfQGI0gbne6aDYAl/xjxmd0w52d5OK3XxdmoumP2VGxS0mHCPzDGxHrs5NEnptLxjlewW6rJWfxwo3/SsuJC3v/7g1BykifHO5pTP7ynP7cuK2Hi88sCqhQ5q1Q9UnXEGnT0SNX5rFb52okYd3w9usqikA1P9kdaZkc2rilgwsiQ31oRQRSXVXDfW9/R//oHOatbL/8XRBDe9Ms52L0uzhmmdRO8Lc+PpXOPPo42hCZqWCD6BXDynfpLbVV5Rxqs3uUXm5j0zALKSor5aJxjVuHqe7pxy/Iyr55T3nBWqXqk6okz6OiRqvdZrWpo9Mzt11wQ9gOUWxsqqWoGApnnF6rRNeGKo+ldOlzZrdUOL5oGhMjZDH7tPTN598kpiNJT3DgomXaJjopfYwcX79m8nm8OFvPV7lpz0fwKO5nFX9e73tdOxLVL5tHlyAr+9OFh/nVzr5AMT26IuIQkdDHxIb+vInL48qdDvP3dSYbd+TSJyW38X9AMBDrLz9/4mkjGuWnH+Wd3/ao4eYCE+PrjbPKLTdz5zHscPVVAVXkp4wYlNHlw8f827eGXA2Ws3O0wFrXbIa/CzoCSPfWub2j0zNPzV1JcVIwQKU2KQ9F4VFLVDASSFDV1dE2w6PV6LKZCspc+jpTUOBfjZVaWf3QxCRyfd2fNnEGb2z0EOUseI+2K32MrLyZv8SPEJrWp95bqjrMZ/OPXHqdt5XEqdIJFPxXz74NZHksfgQ4u7j/kYsZ0KapnLrq/44iAf74D277n20OFZBgrGDnrIEnJyY2KQaFoDJ9tPMBnWbFcPu15TeMINCFq6viaYNEJHYfn309Rfm5QGqaLSeDUwhnYK4qREg/9OrVwBjpjHLaK+vplt1Z7vd/CzzeQd/IIplIL8TE63v2pjJWHhGsuHwQ+uHjU0P6M6lxRz1iUzv0b9TN+vG4LOmllyKzjpCXXvrCpAcrNR1BJlRDiZuCvQH9gqJTyp1AEpWgcvt4s3ccvOHH6sQCYqqzEZ3ard04gz7CYCukw7m/kfvoPut4921U+l9ZqjDGxnFwwnZjYWISApDgDUO26vu4br7NR/LUxHZn49hZeuzqef/1g5drz2nO8+01cOn6qh61BIDTVXNSdWx97jQ8eHcec0Yn8fmV5wEuPvmgpawZF4ISLhn38/T6+ym7DkN/ercXjNcebhllMhV6r223apbuWDAPVMF/6pdfryf30n6RePcNDvwBOLpiOQW+sp1+Vhac4vVumx/2dPVCPX2jg2W+sGPQ6xp2XSmLPIcyYcLmHpUEgNLSkF2gylF9sIi3RwLyx3Zi6siLgpceG7qdsGfwTbKXqF+C3wBshiEXRAHGxMT5L8r7eLL31KQSKv9L+c9PGY1r9GvbKUg/PGff2TXdDvYZwNop3MJgYd4aOJ9ZUcF3/eKpt1ez7xtHD1Fhbg6aYi/qKK1RN6i1lzaBoFJpr2PJvdrOuMIPBYyZpFUKz429J0ZuGzZw0usHqtj8a0rCkpGRX35XFVOhVv6A2gXNn6/tPMe/OwR7HFn6+geEdqxiYKbiwm549eRJsFtcswMZaGjS0pBcooW5QV7YMgRFUUiWl3ANEhVmh1j1Q1197FUMnPt6oa/wJWXHeAY7Ovt3jM+fOGX+l/bo7hdw9Z2x4uiI3hLNK9fhNSVQVHGH8QCMf7rZQabHx49EiRvaLcXlUtYStQd24QtWkXtcNXusBzgoHWmvYB2t3sbGiM+eOntDsz9KyB6op9/enX0lJyU3WMPdEaeak0cTExnroFzg0LCm9t8e1lmoz9opij2POKtWzF9jQC8GNpxu452AVJftMXH5GqsujqiUtDfzZLDT1fsqWwT+qpypAtOqBCoZghLIxVa66Yy2cBCLWzmpQvLUMvdFOJXD3uTG8u62KS3rFsmRzITFJnzFxUEyz2Rp4W5ZryGahKc8OtuqlSu+tj/dW72SLpSeDrgzdwOKG0KoHqqn40w5/nweqYcV52Zg+/We940La6z3DUl1Nj4wkj2POnXpdUwSmaslZHfSM7GVg80kb7/5USkriFm4fFNNslgbetMGfzUJjCbbqFU365TepEkKsBjp4+WimlPLTQB8khJgCTAF44+GxTBkzLOAgWyOB7BBsDhp6WwVcn7k7p+v1eq9l+OemjacoPxcRl0zH219yHReArTSnwW3NTpy9T/O/zsdmtZBohKQYgV4nWPmrEYtdElNewNhBjm3VzWFr4G1ZLhQ9WU5CUfVSpfemEwoN89Cvp6Yx5bK+QcW071gea361MfzOlkmomoNAdwmGmlBpmPM+NinpeOsLruMCMMTE1quAAZzcv40hHTx3Za7fup/t+8p560cbCUZJUowjuSoxC+LioKLcxK1nO3wEm8PSwJs2hKIny0koql7RpF9+kyop5WWheJCU8k3gTQA2zIp6h0OtbBP8va26uw47J7k73ZWzjx2mqMaMDxyiJe027BXFWItzXPeSdisxsYElh87ep7LiQj54dByLxzpc0wvKLVw//yglxSWM6acnzloGxIXc1sDXslwoerKcBFv1UqX34AiFhnno197PJYWHm3yvaouVmUu2cNkDL/s/OYzRyjahqRrmNB51alhRfq5jSLOknn7ZDAavG32Kjx/goks8jZb/8+I08otN3PLwqyy/JZn0JAP5JiuXzc8hr7iC0X31YKkEjCG3NPClDaHoyXISbNUr2vRLLf8pfGKtNiNxzPhzzsoScckkXPEHOnTtxYkjB4hN78aRWbd77MAx52fRoWsvthXkey3Be1sWrJt4tI3TcWWXChYV2Fh1yM7y3Vkkpla67BUaclJ//+8PIhBMnPmq30pQqJvRvRFo1au8rASDrX7fnnJEbl08uuBrBo9/GL1ByW9zYrNZXSNobFYrNqsVY1pndPEprjmlvvSrc48+5FJ/CbGiMJufvkxny5ueFbq6iUebOB0ju1pYWGDnq0OSD/fkkJFa6bJXaMhJfdIzCxAIFjwxyW/y0RLaEGzVK9r0K1hLhRuAWUAG8LkQYruU8oqQRKbwpJlqe+5vb4CrXG7X6Tm5cAZCb8BmKiImOY3qskKMaZ09xkw0hF3aA+rhKCsuZO2K+fyUEMeyHVUAVJWbsJur6ZSi44uJbXh1q55jfSb4TXg2/3cZ5b9uo02czm+C5N4kn3/iKGMHtee2FaFvIg+06lVSkMt5vdI8joW64VThSUtr2NfbD1HSbhCnd+zaXI+IOnxpGPoYshc5JjrYTEWOQ0mpCENgVXRv+pW79h2KDm+od+5XP+5h+/58Fm6vxKDXUVpehbmqms4pOr6cmMzcrdC23zC/ycTCzzdw6PBR2sYJv8mHuzZYrHYu7FjFn3yMsgmGYKpe0ahfwe7++wT4JESxhDVa9UA5sZTm+D+pCdhsNmLTuznK4OAql2dc/aDr+5MLpnPWtHnsmD2VThOe8xi2Ggo2/3cZvVP1dB95m8uTasGfbial2syHtyTRNl40OLvPSVlxIbvWrqBdvOTx4QYe93O+e5O82VJJnLWMa/uIsLI8CHXDqcKTltawN1Yf4KJp2iz7adUD1dz40rB2V04joYOjj+rkgukAdJr0SpP1y1ZdhayurGe7AHDFb/pjKc1l9KgLuf2aC7h+xsvozVY+uiWRNvGCq3tafM7uc5JfbOLjNT/SLl4yc7iR5/2c764N2QWltDFYGdFJhpU2RKN+qfpzgGg9OiazTRiPL5E2l2iBYyuyOT3T65DUunjradr832V0iSnl4p5G0hMdy30Nze5z4n7d2R31fs8/sO17tmRXMH99PmnxgsLKYyS2TSdFI8f04mP7SK/z9xzKhlOFtqzecojUASM0W/aL9NExzUaA+mWtKCE5oxOm49s8jtftGSqvqibDWMXwHu765Xt2n5OFn29wXXdOR4Pf853a8MHPOeQVlZIWr6Ow0vsoG62IRv1SSVWEsOTz76j48nL0xhiP4431mKn7tups1tTHJQC1b3QWUyGAx2d10ev1mPOzEDp9jetwzTPSe/Po7CUBbWn21tN0YNv3ZB0tZ3uWjVd+qHSdK3R6Opp891LtWruClOoKJg4KrLp1zz8+YO2SefQ99QnThqfXjLG5odkSKn+O6gWHd3L53Wd5HAtlw6lCO6SUvLlmH5c+cJ/WoWhCKD2yAtUwc2kesSkZAOjjEurZvjRWv8yFJ4iPq181qtsztHDtFnIKq9iUZefFH6rcnqfj7HLfvVQfr/kRvdnM7YMCq245teGlRavgxBZmXNSmSaNsAqUptgjRqF8qqYoQpE5P+oXjSB14icfxxnrMuAuY0xKh3egZHufo9XoqvnIsUbj3FOjjEjgy+w6k3Uq2rnZ8hJD2JomjL6uBpoyDaUp1K9QGn4HE2JCjemVpUcifqQgPfj1VSGLXM6O2OT2UHllOnXFP1Nw1zGmfsOX5sZw1bZ7r+K75DwWlXxXHdtNj2BUU/bDcdcxbz9DKg3bWzH26UT1D7lWqxlS3WrJnKZpsEYIhOn/DIxAhBJbSPL/nNeaN0GQqw5iU5upBcOLsOUhKSmbrczcj3bYWS7sNQ2IqMYkpnDm51gzVV69GQz0coTTYbEp1K9QGnw0RiKN65zbq17G1smj9Hk6/ODxNNsOJxupXr8mzPKwToFa/dEIXUv2qKDjFnv3ryEiJcx0LVc/Q+q372ZzVuOpWKJ/vj2izRQgGpeIRgtFoRFaU+D3P3xuhU7SK87KxSYkQeo7Mcg75FCBtCJ2erj1qS+Du93NuQ3bvQfCFvze/UBpsNsVXKpTP94c/64b8U8fISKjfAhtNTsStmVPlOs7J7KR1GGFPY/Wr+JXJ2CvLajSsVr/M6ZleR9UEo18735vJa3df4HEsVD1DTV0ma6mepWBsEaJNw1RSFUEIuwW7xYzO6H/HYd3RMRZTITMnjaY4L5tzH/2QHbOnklEzmR1wTWc/uWA6SXGGFmlqDaXBZjg/P5BlxqK8HC7pmVbvWlVyj3xMFWaKpfe+RIVv3DXMqV9F+bnEpXfBkJzeoH45Z/sFM1TeHYvZjKW6qt5xrXuGWuL5wS4xRpuG1beMVYQtccltqS4rDOhcW1UFnSa94vrqMO5v9Jo8y6MUHkmUFRfy1sy7MZVEXt9RQ8uMTrK3reb8/p7LsO4l95Vfb6agpLxF41aEBovVRkKKGp7dWNw1zKlfHcb9zeuc0eYm5/gRhvfLCOoe+cUmbvzz6xH3e9zQEqM/olHDVKVKY4ZOnUN+WX0X7fTkWA8bh/TkWLJ3rKJow3+Ib1v7y+3LY6baVERlbq0fi7Q7XNGl3b/NQTjiq8nb3466cCCQZUZbWS6pKQM9Po82J+LWykff76PTwGu1DqNZCLQHqrEeWbvmP0R1WaFLw5z6ZbNaQxR546goLSTOqPd/YgP4qtiE+/JYMEuM0ahhKqnSmPwyM2f+7sV6x+sajToTrHve2sDgO5/1f2MpXaVxqC2PSzdrdqHTYck/5vjcbsVuMGAxFZKU3tvnbe1WKxXZhzGX5rHl+dphsAadN0u80NBQk/eapfOoPvgta5bMY8y9jzVbDMHgb5kx/+RRBnT0XB6KRifi1kpZlZU2mZ21DqNZCHRXX2PbCWxVFeiTUust71Vk185cbEn9yvl5HWPuHNjgOQ3hq9E7v9jEyN+/iLG6lLkfreMvd4VmuTKUNHWJMVo1TCVVEUaKzkx5aTGJKW29fu58I5TShrRWu457k4y49NrBoM55V+b0zAbfMJ2eMAmZ3f3unqlLU6tKvpq8y4oL2bt6Ka9dFsMDq5cwcvzUsK1WNcTxfT9zx0DPJuZodCJWKJx6YzEVIuKSXRqmtX5Vm4rQ63VNrir5qtjMXbGe0uIi5lwdx2OrfuC+Gy9pNQlHtGqYSqoijLEX9uXt77/g3Ktu9fq5U1DuGz0UY0z9hnYhHQJiLcvn6OzbXcd1Qoe5XXq9cnzdN8yZk0aTcMUfsNlstTO2cIjVc9PGN/hG6s+nyRsNNXmvWTqP0b2sXNwzjtG9qsK6WtUQhXs2MHjkMI9j0ehErFA49WPmpNGYqqxeNEw6Ei5pb7J+9Zo8i+xjh+vp18xJo336VXVq4+iHbErTta+KzejhZ/PBF9/z2/4GhnXVM7KrJWyrVU0hWjVMJVURxjmndaTky28A70mVEyHtXrcNS7tjGHLbjA4exxtj3umcteWOMSnNa2+Fk0B8mrzhq8n724/eYe/qpTx/k5F4o2DyuUauXxF51SqL2Uy6sQqdznMDgda7ihShI7NNHLuOHSQlNV3rUCIKfVxCPQ2rLs1HJ4LTL6ivYcakNHpNnuW1YlVWXECMsDbZq8lXxWb6y0sQ1iruOjuOBKNg0iADE1e2nmpVtGqYSqoiDL1ex2lpevJPHSO9gUn3bTM6eO112PL82JA5GzcGfz5NvvDV5F1R/TE39LLSr51jVl6/dgZG96qMuGrV5k/n88crBmgdhqIZuWZIb9Z9vpWeZ52vdSgRhfvynJMtz4/l3D8vq3e8OfUrO+tXRp7RvslN194qNlabndzCXMYP0NM3XY9eBz3bilZXrYpGVFKlMenJsfWa0p3HffHH3w7m3sXvc8mdvpMHX7ttAhlyHGr8+TQ11Gvlq8n7yRvP4/P9Vr7LMrmOlVRJKg59FDFJld1ux55/mDN7Xap1KIpmRAih2a615qaxu/qCva8W+lWw9wcyByfygo+mayllg31W3io2Ly1axawl/+WzfVa+PuKwGbBJKKuGpFNbVVIVwaikSmPcbRMCJSUxnviKU1SWlxGf6F28fJXCgzXDS0pK5tjSxzEmeRpVOoaZVnu9xt84mEB6reomXlJKKmw6KjxsTwRSJ71eH44c/GkdNwxWLtutnZTEOGTxMa3DaBaayyS4OfXr8Pz7XQ3rTnwNjQegqpR1m3Mb9Gry12dVt8F9/db9VNl0VGOn3C3f1ut1dGtf3wRYETmopCpCmTD8ND756RvOGnFNiz7X2+gaJ75K8A35NA25amxAvVZ1E6+nP94amh9IQ05sX8+oO8/ROgxFMyOEICNBYCopiqh+v9aIeyO8Nw3zRnVpPuuPeW+6bnd8D5Yqk98+q7oN7tHabxQNqKQqQrnknN68+dIq5EVXI0TzeUR5o7El/4Z8mtYumee316qpTe7hzIlDexiYZiMx3v/IIUXkc8P5vVi8aTWDRt2sdSgKGqdhHdrE8v5070nQS4tWwYktDfZZqWHE0YVKqiIUIQRjf9OFdd9+wcCLAq9WhaIHIlQl/0Bm4kHTm9zDmb1fvceCe1TjcrTwf/278Nqqr0ElVUERqh6uQDWswlQGPvq4AjW3jEZX8WgmqKRKCPFP4FoczTSHgDullMWhCEzhibdxNlJKSss+5Lnh/qtVgY6TaEn89VpB4IlXJJG1bwdDOscQF2vUOpSop6U0TAhB7zQ9xQW5tG2XGerbRwTBaJBW+nXklx+5ZWg3r58FYm4Zra7i0Uywlar/AY9KKa1CiBeAR4FHgg9LURdf42w2vzqVnes/5axLrm/w+kDHSbQkgczECyTxijT2ffUeix8YoXUYCgctpmH3XzOIRz7/gAtvndEctw97gtEgrfSrylRKbIb3fyYDMbeMVlfxaCaopEpKucrt243ATcGFo2gs8fHxFP+ylorBl5KQnKJ1OI3C30w8CCzx0oKmjtw5vGsLI05LxmAIbjirIjS0pIZ1aJeCNfc7LNVmr9MOFOFHVdbPnHPFEK+fBdJsHq6u4uE+xDmSCWVP1V1AfVc2RbMihOCF287nkY/nctEdf9Y6nJATSOKlBU0ZuWO1VPPrV2/zzIwrmzk6RRNpdg37w+gBvPnVEoZcO6k5H6MIEUaDIaiNQOG6y68p43YUgaHzd4IQYrUQ4hcvX2PczpkJWIFFDdxnihDiJyHET29++n1oolcA0CmjDacnl3Nsb+TbDEQCzj6vF2/ozL5vPsFUUhTQdZs+fZuHrh2oqlQtTCg0zEO/ln/Z5FgGn96NqiNbsFq8e7opwouK0sB+tyMJ992IK7/eTEFJuf+LFAHjN6mSUl4mpRzg5etTACHEHcBoYIKU0qfzopTyTSnlYCnl4Cljhvk6TdFEHvntEPZ/+S7mygqtQ2n1eO5GdPR3+SMn6yCdqg4zpL/3pldF8xEKDfPQr1uCqzTOGD2QjR+/GdQ9FC1Dp5TW9wLkuRux1sBUERqC3f13JY6mzhFSSvWveTPib5yNwaDn0RsG8conr3OBl0ZYf1uRfe2uKSvMIzktw+t1Wu0a1JKm7Ea0VJv5ecUrLJmhyuzhhhYadnafznT/4Wtyjx8ms0uvlnhkWBCMHYJW+tWyDoDNj9qN2PwE21M1G4gF/lez7rxRSnlv0FEp6hHIOJtBvTsyaOcJjuz4gR5n/Z/HZ/4ExNfuGq0GMIcrTdmNuGHZLP429ly17BeeaKJhf7xxKHe/MYeR972A3hAddoHBvIQp/QoNajdi8xPs7r/TQhWIIjRMv34wE19eQrtu/Uhuq2ZIhZrG7kbc/d1Krulho1/39i0VoqIRaKVhKYnxTLm4B5+sWsw5V9+uRQgKPxzdtZn+XSPTC88X4bobsTURHa9IUcbcKSO4Y97fuPLBF1t8hE1rpzG7EU/9uhfbvrWMm3JpM0akiFRGnduLdTu/Jefoftp376t1OIo6lGRncUH/jlqHEVLCdTdia8Jvo7oi8khOjOPha/rxzYLnaGDvgKIZMVdV8vPHs3nld5eoxFbhk2duG8bWFbOwmM3+T1YoFGGPSqpaKef378KoHoL9G1f5P1kRUsyVFXz75kzmTB6GTqd+xRS+0et1PHLdGWz+7B2tQ1EoFCFALf+1Yu4YdRYvf/IDezbY6X/BVQ2e62t3jUEnQjLANJr48aO5/PW3Z9KhXWQ53Cu04Tf9u7Hz163s3fwN/YZcpHU4EYnSL0W4oJKqVs4fbhjCnM9+Yvc3Fs646Dqf50WjPUKokVLy44o53Hqmkf49OmgdjiKCuOvKs/nDW/8mt2OXqLJZCBVKvxThglqbiAJ+f+25nG7eyS/rPtE6lFaLlJIflr7C+NPhqiFqU6yiceh0Op6ecAE7Pp5FVYVyuFYoIhWVVEUJ91x1NmfLPez8nxrP6KSsuJC3Zt4d8JgZX0gp+X7Ri0w6O47LzukZougU0UabpHieH3cOG5a+onUoigghv9jEjX9+XY2aCSNUUhVF3Hn5IIbEHmH7lz5HNEYV7kORm4qUkm8XvsA9Q5MYMVCNoFEER89O7Rg/MJ6fV3+odSiKCMB9MLIiPFBJVZQx8dIBXJRyim2fv6d1KJrS1KHI7hTlZbPm1encPyyNC87o2gxRKqKRa8/vS3L2Txz5ZZPWoSjCGDUYOTxRSVUUMvai/ozKKGTt64+1+v4NX0t8TRmK7E5hzgn2LH+OBfdfzNDTO4cyZEWUI4Tg73cM58TahVSUlWodTtRiiEvk11xt//s3tLynBiOHJyqpilJuuKAvr004i+/efIwT+7drHU6z4W2Jz1mlGn9u7VDkxlSrflmzguwvXub1qZcQHxvTLHErohshBC9PHsGP7/5FJVYa0f+CK1hTZxxVS+Nrec9Zpbr9XMcQ5NvPTVTVqjBBJVVRTEZqMh9MH4l9y1J++mJRq3Nfr7vEl511mLdm3s13nyzwORS5ISxmM+vn/5XBxkO8/LtLiI0xtsSPoYhSUlMSePqWc/hxxexW97sZCQgh0HIWQt3lvf1Zua6qVUODkRXaopKqKMdo0PPUbRcyrlsRa954gpKCPK1D8iCYHXp1l/g+m/cUhpyd/Lz+M5btqGL4nOOur2U7qjiwzbcgHdu7nW/mPsQLvz2NiZcOCOZHUigC5rQuGUwblsbmj+ZqHUpUYg8ymQ1md17d5b1HZn/oqlqt37qfxTvNDJ6T6/pavNPM+q37g4pXETzK/FOBEIJLzu7JgO7pPLH0ORL6j+T0C6/ROizAc/nu0vFTA77OWaV6cqxjie+WQUm8P2cTc289jcfWmJn4/McktfE/gb6irJRNK+ZwTmoFix+6Qo2dUbQ4F5zRle92b+LQ5rX0HqKGc7ckJ03BXe++fDdjwuUBX+esUi2/xeH8Pn5QAnPnHOKDCZ14bM1mPvzndNq1SQwuOEWzoP6FULjISE1m3tSRDBG7WfXSA+RkHdQ0nmB26DmrVM4lvnhrGeMHGNj8a2lAS31SSrauWs7W9/7C36/twh9vOl8lVArNePimoZh3fEZBzkmtQ4kqEpPbNPnaYHbn1V3eE9ZKbh1gYMOvlWqZL8xR/5j3yJAAAA3pSURBVEoo6jFuxBksmXEZ5u/eYs0bT5J34ogmcQSzQ+/Atu9dS3zDZmUxfM4xVu63seFQqd/G9CN7tvPlK3/ghsyTvDf9crp3SAvVj6RQNJm/3T6c3ctfoDg/R+tQogaziCGvqKxJ1wazO899ee/cWTkMmZPDyv021h8sV03pYY7QpAFywyzVdRkhFJVW8NyKTRQk9uasKyaQ0EKDSMuKC/ng0XEsHtuGdolGCsot3LqshInPLwto2c6dtUvm0ffUJ0wbnu46NvvbfPZ3vMFjSfHAzz9yZP1SRp6exh2XDSQuVjWih4z2A6D3JVr2/YaOvZ9LCg9r8uiKqmomzPqGKx98UVVOW4DdG1dza7v9DOnfvVHX5RebuOXhV1l+SzLpSQbyTVZuWV7WpGW7lxatghNbmHFRbdXspW9KoPN5jVpSVASBzgjn3xuQfgXVUyWEeAYYA9iBXGCSlFLVp1sRqSkJ/OOui9mflcu/3p9JdVofTrtgNO27Nu84lrrLd+479BrTWwWOqtW23CqW1dkenZT9PZeOn8rezV9z4od/c1n/NP7+4CUYDPqQ/RyK8CbSNCwhLoanbhzAi0tf5cJb/6B1OK2euKRUdh8rbnRS1dDuvMYmQuu37udkrpnFO3M9jnfK2a+SqjAkqEqVECJFSlla8+cHgDOklPf6vVBVqiKWwyfy+ffGQ2w8Wk734TfR88xzMcbEhvw5bzx8G6bcrHrHkzK7cc8/Pgj6/hWmMn7531JMx3Zz9cAMbrvsLIRoHYWUsCRMK1VN0jANK1VO5q7cyoGEQZxx0XWaxhEN7Fv4CP+666JGXXPdQ7M5mZtf73inzHT+8+K0UIWmaClaqlLlFKMaEgGVLLVihk6dQ36ZGXA0cn/97feUV5qJS0jk/n+8S8fup4UsMQlF4lQXq9VC1r6dHFy3lG5J8NDlZ3D6zZeF/DmKyCFSNey+0ecy483VFPQ5h3Yd1YikQHhu2nhMpvr9UUlJyTw6e4nP606WWhr9LJU4RS9BWyoIIZ4FbgdKgEuCjkgRtuSXmTnzdy/WO77j9em027WIH74oxRyfwWkXjqFzr37o9Novo5UVF5J7dC9HNn5FijBxUd90nvr9MGKMyk1E4SBSNez5Oy9m4kt/5+IHXsVgVM7+/jCZyug1eVa944fn39/gdfEp7ZorJEUrxO+/LEKI1UAHLx/NlFJ+KqWcCcwUQjwKTAOe9HGfKcAUgDceHsuUMcOaHrUirNDr9UwbMwSA0vJKln79EZvXllBBDPGZPel9/lUkJKWQkJzS7LGUFRdycs9mik8coirvKF2SJef0SOfpKeeqRCpKCYWGeejXU9OYclnf5gw5IGKMBp6ZcD5Pvv1XLv7dM2HxEtMa0ae0Z+/RHE7v3l7rUBQRgN9/ZaSUga6PLAY+x0dSJaV8E3gTUD1VrZiUxHimXH0uU4Bqi5VTBaV8uvFtsvLLqNQlUlItSGrfg6T0jnQd8H8A6A0GYuMTArq/zWrFXFkBgKmkkGPb1gMS08mDJMUIMmItXNKvI8Ou6UxyQlf0erVDKtoJhYZ56FcY9FQ56dslnduGFPH5/5ZzzpXjtQ6nVZLRbygb9/5HJVWKgAh2918fKeWBmm+vA/YGH5KitRBjNNC9QxoPXF/r82SqMGOqNLP10B52rP4OgLySSkwkoA/gTbvKVEKf9gkIIUjQ63j8wj4Y9HpSk4eoWXyKRtMaNOya3/Th56XfcWz/ALr2Hah1OK2O7qcPZPuPi7UOQxEhBLse8rwQoh+O7chHAf87/xRRTVJCLEkJsVzdLoWrh/bROhyFolVo2MM3/R9T575Fm/QnSElL93+BImCEEOSaqrUOQxEhBLv778ZQBaIIf9KTY9n11kNejysUkUhr0TCDQc9T48/nD4v/yYjJT2OMVb+TdUlKSvbalJ4UgKGxMTmjOUJStEKUo7pCoWh+wtSnqkmEUU9VXbbuO8ZrW+wMu+U+rUNpVez67guuTj7A5eedpnUoCi1ohE+V6uJVKBSKVsK5/bpyUVohezev0zqUVkXbTr05nte0GYCK6EIlVQqFQtGKmHT5IKw7VpKTdVDrUFoNHXv0YdORUv8nKqIelVQpFApFK0Kn0/HsxGHs/Pccl/2IIjh0Oh0msw1N2mUUEYVKqhQKhaKVkZQQy78mnMd3i+pPQFA0DWNqZ6otVq3DUIQ5KqlSKBSKVkjX9qncdk4yP69apnUorQJ9fDJZucVah6EIc1RSpVAoFK2U0b/pQ1LuFo7t3ap1KBFP7/OvYs2OY1qHoQhzVFKlUCgUrZjn7riI/f99lwqT2r0WDEKnw25XPVWKhlFJlUKhULRihBDMvWc4G955QjVaB0GbdhlsPVqidRiKMEclVQqFQtHKSUtJ5L5Rfflh6SsqsWoiBoORuIQkrcNQhDkqqVIoFIooYMTAbtzSD7atWq51KBGLSkgV/lBJlUKhUEQJVw7pQ+eyHRza+rXWoUQkxdYYSkyVWoehCGNUUqVQKBRRghCCx8ddQP6PH1NWXKB1OBFHUnpHzMqrStEAKqlSKBSKKOOVu0ew/f1nKCnI0zoUhaJVoZIqhUKhiDKSEmJ5feoIdix5hrLiQq3DUShaDSqpUigUiigkIS6Gv40bwublr2K32bQOR6FoFaikSqFQKKKUbh3SmHlVd75++ylsVtUrpFAEi0qqFAqFIooZ0LM9f7mmJ1/P/ytWq0XrcMIaU/avJCfEah2GIowJSVIlhPijEEIKIdJDcT+FQqFoSaJdw/p3z+Tp60/jm7eewGqp1jqcsCU1Xk98bIzWYSjCmKCTKiFEV2AUkBV8OAqFQtGyKA1z0KdLBn+/qT/r3/wLFrNZ63DCDrvNRlVFhdZhKMKcUFSqXgYeBpTVrEKhiESUhtXQs2M7/jVuIF+/9TjV5iqtwwkrivKyGdQ1UeswFGFOUEmVEOI64ISU8ucQxaNQKBQthtKw+nRtn8rLE85h47w/cHzfNq3DCRsObfwvIwd10zoMRZhj8HeCEGI10MHLRzOBx4DLA3mQEGIKMAXgjccnM+WmyxoRpkKhiGhiUzR7dCg0zEO/nv0TU645L6QxhhudEjNZ+MRp/Pmt/2DsfRptUttpHZLmxNkr6dt3CBj0WoeiaGn0flMlF6KpAyKFEAOBNYBzkbkLcBIYKqXMbtJNQ4AQYoqU8k2tnh9ucUD4xBIucUD4xBIucUB4xdIShKOGhdPfQbjEouKoT7jEEi5xQPjE0uSkqt6NhDgCDJZS5ofkhk2P4ycp5WAtYwinOCB8YgmXOCB8YgmXOCC8YtGCcNCwcPo7CJdYVBz1CZdYwiUOCJ9YlE+VQqFQKBQKRQgIfKHQD1LKHqG6l0KhULQ0SsMUCkWwtMZKleZrqjWESxwQPrGESxwQPrGESxwQXrFEK+H0dxAusag46hMusYRLHBAmsYSsp0qhUCgUCoUimmmNlSqFQqFQKBSKFqfVJVVCiGeEEDuEENuFEKuEEJ00jOWfQoi9NfF8IoRoq1EcNwshdgkh7EIITXZHCCGuFELsE0IcFEL8WYsYauJ4RwiRK4T4RasYauLoKoRYJ4TYU/N386BGccQJITYJIX6uieMpLeJQ1BIuGhYu+lUTi6YapvSrXhxhoV81sYSVhrW65T8hRIqUsrTmzw8AZ0gp79UolsuBtVJKqxDiBQAp5SMaxNEfsANvAH+UUv7Uws/XA/txzFc7DmwGxkspd7dkHDWxXASYgIVSygEt/Xy3ODoCHaWUW4UQycAW4PqW/m8ihBBAopTSJIQwAt8BD0opN7ZkHIpawkXDwkW/amLRTMOUfnmNIyz0qyaWsNKwVlepcopRDYloOM9LSrlKSmmt+XYjDnNBLeLYI6Xcp8WzaxgKHJRSHpZSVgNLgTFaBCKl/AYo1OLZdeI4JaXcWvPnMmAP0FmDOKSU0lTzrbHmq3W9aUUY4aJh4aJfNbFoqWFKv+rHERb6VfP8sNKwVpdUAQghnhVCHAMmAE9oHU8NdwH/1ToIjegMHHP7/jga/QKGI0KIHsA5wI8aPV8vhNgO5AL/k1JqEoeiljDUMKVftSj9ckNr/aqJIWw0LCKTKiHEaiHEL16+xgBIKWdKKbsCi4BpWsZSc85MwFoTj2ZxaIjwckxVQwAhRBLwETC9ToWixZBS2qSUZ+OoRAwVQmi2rBAthIuGhYt+BRqLRij98kE46BeEl4aFzPyzJZFSBjqNeTHwOfCkVrEIIe4ARgMjZTM2sDXiv4kWHAe6un3vnLEW1dSs/38ELJJSfqx1PFLKYiHEeuBKQNNG2NZOuGhYuOhXILFoiNIvL4SbfkF4aFhEVqoaQgjRx+3b64C9GsZyJfAIcJ2UssLf+a2YzUAfIURPIUQMMA74j8YxaUpNc+XbwB4p5UsaxpHh3NUlhIgHLkPD3xlF+GiY0i8XSr/qEC76VRNLWGlYa9z99xHQD8dOkaPAvVLKExrFchCIBQpqDm3UaBfPDcAsIAMoBrZLKa9o4RiuBl4B9MA7UspnW/L5bnEsAS4G0oEc4Ekp5dsaxHEh8C2wE8f/qwCPSSm/aOE4zgLew/H3ogOWSymfbskYFJ6Ei4aFi37VxKKphin9qhdHWOhXTSxhpWGtLqlSKBQKhUKh0IJWt/ynUCgUCoVCoQUqqVIoFAqFQqEIASqpUigUCoVCoQgBKqlSKBQKhUKhCAEqqVIoFAqFQqEIASqpUigUCoVCoQgBKqlSKBQKhUKhCAEqqVIoFAqFQqEIAf8P6h5ETysCFxYAAAAASUVORK5CYII=\n", "text/plain": [ "
    " ] @@ -404,19 +382,9 @@ "execution_count": 11, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n", - " FutureWarning)\n", - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n", - " \"avoid this warning.\", FutureWarning)\n" - ] - }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAl8AAAHiCAYAAADWA6krAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3Xd8VFX+//HXZ2ZSSUJJ6L2JIMUCdimrIiCKZS3YdW2s6Fp2Xdvuuq67uu5vdVUQv6irogh2RVARV4oKCkgvSi+hp2fSZ+b8/piZMEkmjdxMST7PxyMPk7l37j0B583nnnvOuWKMQSmllFJKhYYt3A1QSimllGpOtPhSSimllAohLb6UUkoppUJIiy+llFJKqRDS4ksppZRSKoS0+FJKKaWUCiEtvhQicq2IfHWM790oIiMtblLEE5EvROTGcLdDKWUdERkpIunhbodq+rT4ijIisktEzrPymMaYmcaY0XU49xsi8mSl955gjFlUn/OJSA8RMSLi9H3tEpGH6tnssDLGjDXGvBnudijV1PnyociXFQd9OZQU7nY1lC8DCwJyMCfE59dCM4y0+FLh1MoYkwT8GviTiJxv9QlExGH1MZVSIXeRLytOBE4CHg5ze6wyxBiT5PtqVd83a75FLy2+mhARuU1EtolIlojMEZFOAdtGi8gvIpIrIi+JyGIRudW37SYR+c73vYjIcyJy2LfvOhEZKCK3A9cCD/qu0j7z7V/eEycidhF5RES2i0i+iPwkIl1ra7cxZiWwEW+w+tvbSUQ+FJEjIrJTRO4J2JYgIm+KSLaIbBaRBwOv4Hxt+qOIrAMKRMRRy/FOFZGVIpInIodE5Fnf6/Ei8raIZIpIjoisEJH2vm2LAv78bCLymIjs9v25zRCRlr5t/l6+G0Vkj4hkiMij9f7LVUphjDkIzKdiVlwoIqt9n9+9IvJ4wLYaP3++LHnDlyWbgGGB5xOR/r7Peo54h1hcHLDtDV+WfuHLxO9FpIOI/Md3vJ9F5KRj+T1ryXIjIneJyFZgq++140VkgW//X0TkyoD9x4nIJl8m7xOR34tIC+ALoJMc7XnrVKUhqvEYY/Qrir6AXcB5QV7/FZABnAzEAS8CS3zb0oA84DLAAfwOKANu9W2/CfjO9/0FwE9AK0CA/kBH37Y3gCeraw/wB2A90M/33iFAapC29gAM4PD9fDpQCFzq+9nma8OfgVigF7ADuMC3/WlgMdAa6AKsA9IrtWkN0BVIqMPxlgHX+75PAk73fX8H8BmQCNiBU4AU37ZFAX9+twDbfMdNAj4C3qr0u77ia8sQoAToH+7/l/RLv6Lhq1LGdPFlzPMB20cCg3yf88HAIeAS37YaP3++LPkWaOPLiw3+LAFifJ/rR3y58SsgH+jn2/4G3sw9BYgHvgF2Ajf48uJJYGENv5cB+gR5vdosD3jfAl+bE4AWwF7gZrz5frLv/Sf49j8AnOP7vjVwcsCfW3pd/g70y/ov7flqOq4F/muMWWWMKcHbLX+GiPQAxgEbjTEfGWNcwAvAwWqOUwYkA8cDYozZbIw5UMc23Ao8Zoz5xXitNcZk1rB/hogU4S1+XgI+8b0+DGhrjHnCGFNqjNmBNzyv9m2/EviHMSbbGJPu+30qe8EYs9cYU1SH45UBfUQkzRjjNMb8EPB6Kt6AdBtjfjLG5AU517XAs8aYHcYYJ94/+6ul4i2Bvxpjiowxa4G1eP8RUErVzSciko+3yDgM/MW/wRizyBiz3hjjMcasA2YBIyq9v7rP35XA340xWcaYvVTMktPxXkw97cuNb4C5wMSAfT725UIx8DFQbIyZYYxxA+/ivUVak1W+XrUcEfGfu6Ys93vK1+YiYDywyxjzujHGZYxZBXyIdzgHeHNsgIik+DJzVS1tUiGgxVfT0QnY7f/BVwRkAp192/YGbDNA0IGWvoCZAkwFDonIdBFJqWMbugLb69HmNLzh9nu8V2Exvte74+0O94dSDt6rz/a+7RV+n0rfB3uttuP9BjgO+Nl3a3G87/W38N7imC0i+0XkGRGJoaoKf/a+7x0Bx4eKxW6h7/dWStXNJcaYZLw5cTze7ABARE4TkYW+IQW5wJ2B232q+/xVzpLAz3EnYK8xxlNpe+eAnw8FfF8U5OfaPucnG2Na+b78QyFqynK/yvl2WqV8uxbo4Nt+Od4L8N3iHW5yRi1tUiGgxVfTsR/vhxAA3z39VGAf3m7nLgHbJPDnyowxLxhjTgFOwFuU/MG/qZY27AV616fRvh6lfwPFwG8DjrMzIJRaGWOSjTHjfNsr/D54i74qh67UrmqPZ4zZaoyZCLQD/gl8ICItjDFlxpi/GmMGAGfivcK8Ici5KvzZA90AFxWDWCnVQMaYxXhv9/2/gJffAeYAXY0xLYGX8Q57qIsDVMyPbgHf7we6ioit0vZ99Wx2fdWU5X6V821xpXxLMsZMAjDGrDDGTMCbb58A7wU5hgoxLb6iU4xvMLj/y4E3gG4WkRNFJA74B/CjMWYXMA8YJCKX+Pa9i6NXRRWIyDDflWQMUIC3KHL7Nh/CO66pOq8CfxORvuI1WERS6/g7PY13MH88sBzIE++g+QTxDuQfKCL+wbDvAQ+LSGsR6QxMruXYNR5PRK4Tkba+K1z/dG+3iIwSkUEiYsc7Zq4s4M8i0CzgPhHpKd4p8P8A3vXd4lVKWes/wPki4h90nwxkGWOKReRU4Jp6HCswS7oAdwds+xFvBj4oIjHiXc/wImB2g3+DmtWU5cHMBY4Tket97Yzx5Xh/EYkV7zqOLY0xZXhzLDDPU8U3OUiFlhZf0elzvF3a/q/HjTH/A/6E917/Abw9UFcDGGMygCuAZ/B2Xw8AVuIdeFpZCt7xUNl4u74zOXqV+RresQM5IvJJkPc+izfMvsL7IX8N74DQupjnO+dtvvESF+Gd0bQT7+DRVwF/SDyB97bpTuBr4INqfhfA27tWy/HGABtFxAk8D1ztG8PRwXfsPGAz3kH+bwc5xX/x3qJc4jt+MRVDXCllEWPMEWAG3rwDb4/5E74xYX/maM9OXfwVb87txJtbbwWcpxS4GBiLNzNeAm4wxvzc0N+hJjVleTX75wOjffvsx3uL9Z94B+sDXA/sEpE8vLdkr/O972e8F447fJmusx1DSLzDf1Rz4utGTweuNcYsDHd7GkpEJuEtmCoPslVKKaUijvZ8NRMicoGItPJ1Yz+Cd0zED7W8LSKJSEcROUu862v1Ax7AO9NIKaWUingNLr5EpKtvpslm8S5C97sg+4iIvCDeRePWicjJDT2vqrcz8M5EzMB7C+4S3zTlaBQL/B/eNXe+AT7Fe0tAqXrTDFNKhVqDbzuKSEe8i3CuEpFkvItZXmKM2RSwzzi8Y2DGAafhXSDvtAadWCmlLKAZppQKtQb3fBljDvgXbfMN/NtMxfVIACYAM3wLb/4AtPIFnlJKhZVmmFIq1Cwd8yXeFXhPwjtFN1BnKi4Kl07VcFNKqbDSDFNKhYJlT0T3rW/0IXBvkEewBFvwLuj9TvE+wPl2gOseePKU4RdPDLabUqoJum14r7oujmk5KzJM80s1FWvfepwpNw+rfUd1VIfB0GtEnTLMkuLLtyDnh8BMY8xHQXZJp+Iqwl3wrkdShTFmOjAd4JUlO3QdDKVUo7MqwzS/lFJ1YcVsR8G7mOZmY8yz1ew2B7jBN2PodCDX1P1hzUop1Wg0w5RSoWZFz9dZeFfQXS8ia3yvPYLvGVnGmJfxrsg+DtiG96GmN1twXqWUsoJmmFIqpBpcfBljvqOWh5ga73oWdzX0XEopZTXNMKVUqFk24D5UBEPLGA/xdvDeLYgsxhiK3ZBbZsPUnOdKqWZG80spBVFYfLWM8dCqRTwecUAEhhfGEG9cUFBMTpk93K1RSkUQzS+lFEThsx3j7URucAGI4BEH8ZpbSqlKNL+UUhCFxZeIRG5w+YlE5C0FpVR4aX4ppSAKi69IsPK7b/jNRWdz87gzePfVF8PdHKWUqhfNMKXCS4uvenK73Uz9+yM8+dJMpn+6mEVffMLu7b+Eu1lKKVUnmmFKhV/UDbivj9/dcCm5eZWfEgItU1J4fsbHx3TMX9avpmO3HnTs2h2AEWMnsGzhfLr37tegtiqlVKDGyC/QDFMqEjTp4is3L4++t0+p8vrW6ZOP+ZiZhw/StsPR5+mmte/IL+tWH/PxlFIqmMbIL9AMUyoS6G3HevKutViRDk5VSkULzTClwk+Lr3pKa9+RIwf3lf+ccegAbdq1D2OLlFKq7jTDlAo/Lb7qqd/AE9m/eycH0/dQVlbK4i8+5fSRF4S7WUopVSeaYUqFX5Me89UY7A4Hv33kHzx650Q8bjejL72aHn10oKpSKjpohikVfk26+GqZkhJ0cGrLlJQGHffU4edy6vBzG3QMpZSqSWPlF2iGKRVuTbr4ash0bKWUCifNL6WaLh3zpZRSSikVQlp8KaWUUkqFkBZfSimllFIhpMWXUkoppVQIafGllFJKKRVClhRfIvJfETksIhuq2T5SRHJFZI3v689WnDdcnv3TfVw1YiB3XDoy3E1RSjWQ5pdSKtSs6vl6AxhTyz7fGmNO9H09YdF5w+L8CVfy5LR3wt0MpZQ13kDzSykVQpYUX8aYJUCWFcdqDLnZmfz9nuvIy7GmiYOGnkFyy9aWHEspFV6aX0qpUAvlmK8zRGStiHwhIieE8Lx888lMPPvX8r+P3w7laZVSTYfml1LKMqEqvlYB3Y0xQ4AXgU+q21FEbheRlSKycsmcWQ0+cW52JqsXfMB/LuvC6gUfWHb1qJRqNjS/lFKWCknxZYzJM8Y4fd9/DsSISFo1+043xgw1xgwdfvHEBp/7m09mclEf6Ns+gYv6oFePSql60fxSSlktJMWXiHQQEfF9f6rvvJmNfV7/VeM1p7QE4JpTWurVo1KqXjS/lFJWs2qpiVnAMqCfiKSLyG9E5E4RudO3y6+BDSKyFngBuNoYY6w4d038V42pSTGA979WXD0+9eAk7rtuPOm7tnPduSfz5Uc6c0ipaKX5pfmlVKg5rDiIMabG/nVjzBRgihXnqo/1y7/l2wPFzFqXXuH1Vke+5dKb7znm4z78zLSGNk0pFSE0v5RSoWZJ8RWp/jzt/XA3QSmljonml1JNlz5eSCmllFIqhLT4UkoppZQKoagrvowx0PhjXRvGGEIwHlcpFWU0v5RSEIXFV7EbbMYVuQFmDDbjotgd7oYopSKN5pdSCqJwwH1umQ0Kiom3g2/pnYhijKHY7WunUkoF0PxSSkEUFl8GIafMDmXhbolSStWP5pdSCqLwtqNSSimlVDTT4ksppZRSKoS0+FJKKaWUCiEtvpRSSimlQkiLL6WUUkqpENLiSymllFIqhLT4UkoppZQKIS2+lFJKKaVCSIsvpVREOJK+M9xNUEqpkIi6Fe6VUk2HMYb1S+aStfE7jk+zwTXnhrtJSinV6LT4UkqF3N6tG9mz4iticncyYVgPLrz7nIh81qFSSjUGLb6UUiFRVlLC2gXvkr97HcN7p3DH2N50bntcuJullFIhp8WXUqpRHdm/hzWfTqd9bBF3jurLSZeMCneTlFIqrCwpvkTkv8B44LAxZmCQ7QI8D4wDCoGbjDGrrDi3ajxPTZ6I05lf/nNuZgYe40GMh1ZtO5S/npSUzMNTZoWjiSpCFeTlsHP1t2SsX8gJHRKZftMQWiUnhrtZQWl+NV2BGebPL6BChml+qXCwqufrDWAKMKOa7WOBvr6v04Bpvv+qCFBdQBm3m47XPgWA3W4nzu0mLq0b+9+4l8QL7sPtdgOw+60H+e34UwGwiY2WqWmAhlpztGvzGrYu/pCOMYVccloPRt0bFQPo30DzK2rVdJFoxEaHq58EINVVRmKH3gDsfW0ycefdA2h+qfCwpPgyxiwRkR417DIBmGGMMcAPItJKRDoaYw5YcX5Vf4GBlZ1xOCCgXCR26AXArhdvIC6tGwAlGXsqvN/tK8QAxBFL98nef7cKD27H7ogBYO/sx3j0pvGABllTlp+TxdZlX1CwYxVDu7fgr3cOw+Gwh7tZdab5FX2qy6/KF4lAeU4VHtxR/n5jTND8KsnYg91ux+12V8gv0AxT1grVmK/OwN6An9N9r1UJLxG5Hbgd4LoHnmT4xRND0sDmoHJgxSS1AcAgQQPq2Bw9lkFwFrt859uuhVgT4vF42L5qCemrF9ItoYgbT+vNaeNHNdUZi5pfEaAu+VX5IvFY+C8sY5LaUEQs7uJC3zk1w5R1QlV8BUtkE2xHY8x0YDrAK0t2BN1H1V11gWVPbkunG/8NeHu4GoUxdLrpP4BeUTYVxYVONiz8mOxflnPF6d14/OYTSYyPDXezGpvmV5iENb8Ad3FheYYVHtyB3eH9J1N79VVDhar4Sge6BvzcBdgfonM3K5XHP/i75AO74wH2vHpXyNumV5TRyePxcGDHJnYsfp8kTx53XzCA/uNHY7c3mwdkaH6FUHW3FNsGjNkKR37B0VuYxuPWXn3VIKEqvuYAk0VkNt6Bqrk6XqJxOJ359Lr1xfKf102ZRFxat2Prjjfu8nETZc4sbGIrH8h6cPZj5VehHMOtpsArypKMPXTu0ReAHa/eXf92qkaRm3mEn7+dQ+nedZzZN5U/3DCEFglx4W5WOGh+hVBghvnzC45tSIQ9PrE8w0rzMhCb94LBU1Za/vqx5Bdi1/xSDWLVUhOzgJFAmoikA38BYgCMMS8Dn+Odpr0N71Ttm604rzrKf7WYnXGYdVMmlb9emnek9jcHFFmluUcQu3ewtE2EpHjv/yJJab0rXNEFXp3m5LvZPcXb9W88HmJT0sqPW5t9Mx/GXZRHpm+Qfpkzi0dvGq9XkGG0ddV37FvxOe1jinhwzGCO+/XocDepUWl+hV/l3i5/htU3vypfJCbFd4DyDOtbninV5Zf/vTFJbbDHJ5b3ztfkyLznyfRlnT+/QHvBVM2smu1Y46hS3yyh8PQTNxP+q8V9u7aWXylC9eMhRKS8N0xs9oAi67g6BUZ1+1QIUae9/Bz+8V6VeUoL6XjDs8TEentU/FeRegUZWsWFTtZ88Taeg5s594QO/OOO06NqxmJDaH6FX2BvV2CG1T+/ejcovyAww0rJdmYF3DUIPoTPU1ZMl1unAtoLpupOV7iPYpWvFvft2orb5aI4I534tC5V9rfbjxZDnqI8Sr5+AYCuPeoWWHVRpXfMdw5/G2u6onSVluB2udi3ayvZGYf1CrKRedxutqxdzp7vP6JbsuHB8wfQr3vT7uVSkSNYfgG4XWXVvqex8wuqz7DAAf/V3ar055d/f80wVR0tvqJYsLERZaUluHIOHd1JhP1v3EuZM4vWae3KX7Y6sIKpfPxgV5TG7SqfSmYAsTnKB+X7fze9grRWTuZhNn41k9LDO/n1sE48cecZzXUslwqjOo3t8uUXUCHDQpFfEKQQK+/VN0d73ioVYv7fQzNM1USLryhT7dgIZzbgnRNvPK6jwYAhKd5R5+74xhTsitIU5+POO4Qb71WjzeFdtqDUmV3+u+k4ioYrLixg14Yf2ffjPPqkxfKPMYPopA+1ViFWW375Vc4vqPstxcZSXY+Ypyivyq3J4ox0SvOzqmSY5pfy0+IrytQ2NsIRG4fb4Sgfd1CS1o6/vzE3PI2tgT+AHr1pfHlbvb+P73ZppTXCdBzFsTm0dyebFrxDQtEhLj2tBxfe22QXQlVRoC5ju+xRlF9QNcPAO/HIntS6SoZpfik/Lb6iQPVjI1xHd6o046fE1z2flJQc2sbWU1JScnkgVRhTETBTUsdR1I/LVcaWH+ZzcP33DEiFV64fRouEE8PdLNWMBc7GDswvV2kJjti4qM0vCJ5hpflZxLTpDOhYVhWcFl9RoC5jI2JT2jJ48jTA2zsUiVeLwVS+ggz8PQPpOIqaGWPIPnKAdfPeILE0kytO786oSWcQ00xmLKrI5s+wwPwqOrynfP5gtOYXBM+wdVMm0enapwAdy6qC0+IrggVbu6s0P4t9Mx+m87VPAUcHfZY5s8o/zNFwtRhM4BVkWZAp3vtmPkyZjqOooKy0hI1LPuPIpmWc1CmOZy45gc5th4S7WUoFHd8VmF9is1GWtQ+Pw9Ek8guOZlhgfvnHsmp+qUBafEWwYGt3FR3eQ8Y87zPN7I6YiB8bUR+1jaPwlBbS/qonSWh39CG6zXEchcfjIWP/bn5ZMJP40ixuHtmXU84dTmyMfpxV5Ag2viswv+LTupR/hptCfkHNY1k1v1QgTesoIzYbbmd2+fIR0TQ2oj6CjaNwO7PLHw/SHBUV5LNxyVxyf17K0J6teOHqwbRKTgx3s5Sqs8D8gqPjuzS/VHOjxVcEqjw41e1yUXR4D2KzEZ/WhdjkNgyePC3qxkbUR3XjKOLTuuAqLcFAlUGsTbX7fu/WDexduYCY7J3cNXYggy4cjU1DXEWo6gbX+xd/9ucXRN/4rrqqKb9AB+ErLb4iUuXBqf5ioyxrHyUZe8rHRzS1q8XqVB5H4Xa5EJsDmyOWuLQu5YNYm1L3vauslDXzZ+PcvY6zeiVzx5g+dNZ1uVQUCDa43lVaQmml/IKm12MfTE3jwALzC3QQfnOixVeECLachMtVdnQqNuDxrX/TVMZH1FXlcRSBY+ACFzNsCoux7ti0hp3ffUwb8rhlZF9Ou2RUuJukVJ1U7vFyucooKy1BqLj+YHPPLzg6DkwXY22+tPiKEMGWk7AnpLB/xv04HDFA0x0fUVf+K8jA9cD86+l0uvapqF2MtSA/l40LZlO47xdGn9CG3197AqktW4S7WUrVS+UeL3tCCgdnPohxu3A4YjS/algPzL8shQ7Cbz60+IpgnYMUFM3pirGywCvIwELVH1zRZs+W9fwyfwbdUoQHLxhA3y7nhrtJSlmmc5CCQvPLK9h6YKp50eIrzGpfy0vVxb6ZD+MuyiMzoJcwErvv87Iz2PbjAgq2r+TELi14e/LZukSEimqVM6w0P4u9r/8OW2yiZlgd7Zv5MJ7SQozbRaavlzAS80tZR1M/zIKt5VWckc7BWY80+eUkjlWwxVjdRXl0uuHZ8vFxkdR97/F42L5qCbtXfk3v5DJuOrUXp16oz1hUTUPlDCvOSMd4PBx697EKGab5dVSVSURFeXS84dny8XGRlF+qcWjxFYGay3ISxyrYYqyZjpjywitS5OdksvbLd7Bl7+KCge148o5h2sulmjz/cgqaYdWrPAg/0xFDTITll2pc+i9BGD01eWKFtbyOPqvRNLvlJI5VsGUovExY1gBzu1xs+eErDm78nl5JZTw08nj6d/9Vo59XqVALth6hN8MMdt+tM82wmkVafqnQ0eIrjJzOfGKS2pTfbvQrydhD62Y2HftY1bQMBRCyNcDysjLYvPhjiveu44YRfTn7N0NJjI9t1HMqFU7B1vMCmtwjgxpTpORXZcWFBVCSH9JzNjeWFF8iMgZ4HrADrxpjnq60/SbgX8A+30tTjDGvWnFupcJp57of2LPsM1IdhTx04RB6//oCHcsVhTTDlDpq+9qlDO7WKtzNaNIaXHyJiB2YCpwPpAMrRGSOMWZTpV3fNcZMbuj5moLA7np7clv2vHoXACKC3e6gzJlF1x69w9zKqvJzspj9rz8w8cH/R1LL1uFuTgWBa4DZElIwxgBgi4kv775/avJES7ruD6fvZMcP8/Ec3MSvBnbkr3ecRozD3uDjHouMHCd3PP020x++XtcGO0aaYfVT+XajR+zsefWuCvkViQPsoyW/YpLa4Ha7MMZUyK9Q3n7MW/MlN905vNHPA803w6zo+ToV2GaM2QEgIrOBCUDl4FI+gd31nW78d/nrgTNcIvH+/oov3sVxaD3LP5/NryZOCndzKgjsvo87754qt3JjktqUP0HgWBhjWLvwU7J/XsagdnYeG348PTtd0KA2W2HGvKVkH9zLm3O/5/5rR4e7OdFKM6weKt9u7OLLsEhdz+uXHxaQs/ITtu9O5/C2rcx58iaGDT7e0nNsO1LC2Pv/c8zvr7yGYV1uP276bi5F6+eTkpR47A2vxpgTO4dsclBzzTAr/nQ7A3sDfk4HTguy3+UiMhzYAtxnjNkbZB8VofJzsvhlycdMvbQzd839mFPHXR1xV4+NIX37z+xcNpfYvL1cdloPxt0TmqvBusjIcTJ38QqmXZbGpLkruHH8Wc3qytFCmmFNWFF+DteP6Mtfp61m/m2dmDT3CH+87NoaPyslpWX8sHFXnc/xysIdte9kseL8XEb1b0eHVOt6GEWE00/oEbLCqzlnmBV/wsEGuJhKP38GzDLGlIjIncCbQNApYCJyO3A7wHUPPMnwiyda0MTIEK3d9eDt9Tq3m5t/fL6Hc7u3isjeL/B23++d/Zj30R15R0B8twNFyHaaOnXdlxQXsX3tDxz48TOG923JbeN606Vd/xD9BnU3Y95SxvYCe2keY3vFNLsrRwtZlmFNOb+g4gxtf36B9x9tT1FeRObX8WeN5YnHr2FkV3hk3mFGdUuq9bPyxfKtfHCoE+279qrTOY67eLwlbQ28/Wg87gr55X9+rX/4xIARl7D853VQZsmpAdi/4xcKS7Yx5jRrewar05wzzIriKx3oGvBzF2B/4A7GmMyAH18B/lndwYwx04HpAK8s2VE5AKNatHXX+/l7vXp0KSW/oBCHO5FflkRm79fDU2ZVfHTHTUdvBdS2cOHhfbvZ9NVMHM79XHVGT8bcOwqbzRaqpteL/4rxxfPBXVbKuN4O7l7QvK4cLWRZhjXl/IKKM7S7VBoyUfL1CxGZXwd3b+ObHzeyOzWGbimAu4y5i6v/rHS85C8cPJLNpb++gszcPXU6x2EjtGp7FwktGlZ4Bt5+dBa7KuQXeP+cnV+/AEB8YguOP/kMlr0/FU9RXoPO65d3+DAtB4ZmvHFzzzAriq8VQF8R6Yl3JtDVwDWBO4hIR2PMAd+PFwObLTivChF/r9fSLdlMHZ/EXfOyObdfbMT2ftVHQX4uu1Z/S8b6hRzfLo5p15xMSosh4W5WrfxXjA53Md1bO9idU8zYXgnN6srRQpphTdiS96dzzzmprN/nZNqFCUyaV8Cofq2q/awc+OSv9T7He4s2sOFAOl37hL5Z28VVAAAgAElEQVSHPLVoN/+6ZUTIz9tQzT3DGlx8GWNcIjIZmI93mvZ/jTEbReQJYKUxZg5wj4hcDLiALOCmhp43mtnjE9n/xr3lP0fy7UaArau/59vtWfy6L+DxcFJ7mLUiiw5Z30dk8VV54UI/u/3ojMTdm1ez7bs5dHTkc9HQ7vzqnuh63M+iVVv4eWc+rywtJSVeyCs24Cjj+CNbmkVwWUkzrH4q5xcQsTO0ATIP7ef17Xn0aeXBbTwMaQ+vr8xjYHb1n5WVv+zjuS+3kpJSt0zOdRYydGA7y9qclJRMdsb2CvkFFTPMb19JIpNe/8mS8+bm5vLwxcczqFcnS45Xk+aeYeKfkh+JmlK3/VOTJ7J313ZiktpUeN0en0gCpRHZXe+Xn5PF2w9fzdTxLfjH53t45MJu3PVZAdc//W7E3XYM9OhN40m84D7cbjcAxlVG4bYfyf3hQ7q0a8Uvb/2B+LiYMLfy2GTkOLnyweeZeXkSBfk5JKW04poPnLz/r3uju8v+zLujpwKuRVPLL/941cAMs8cncsKt/47YIRNwNL+Gt3Py2Khkbp69nxxPCz557v4Kn5VNuw/z2perSWuVwoYd++l3xSN07NEnbO2unF9+B2c/RtcevRtlRvyeLRvpvP197rxomOXHriwjx8nlD/yHf40s4+Rebckp8nDle/nRnWEdBkOvEXXKMF3hPkScznw6XP1klSUQ9r9xL8RH9l/Dii/e5aK+8PWmLPILCvl6YxYX9Y2LituOLlcZdnsM+esXgNtFcv+zKNm6DHusJ2oLL/B22Y/vY0NcRbjLSqGsiPF9bM2my16FVuWHZ/tV7gGLRP78KiuyMWNlLgXFZaS1KK7yWbHbBOPxsLG0A6dO+iOJEXAnwu12W75sTk0MBrstNNc/M+YtZUTnUlo63GTkOOmQmtKsMiyy/9VvYux2e5Vu5DJnFklpkdld77d19fesOFCAOz+TF8bGc88Xh3Akp9LyYGTedgTf4zHKitj/5v3EJCTjSEhCbHYy96wjLiERcIa7iQ2yaNUW0g8W89yiPNok2MgqKqRt6xS6HGoeXfYqPCpnWKQ/v3HXhh9ZvuAjFudlUVBYRGqcmxfHxnH3F0UU/bi5wmelX9e2/Pm6EVzx3CK2LJ13TOfr2O8k2nfvZ0nbA2duB7LHJwKl5GVnsGPF15acy+/gnl2MHJZi6TGrs2D5ZjZszefTjf78KsJmEzo1kwzT4isE/NOz4yp1H9vtdlqntYvIBVUD3fHM23wzaxrHHfiYYUNac8meLezuczljb74/3E2rYv+OTWxf+B6JZdl8/fermPCX2djHPITLXfEO0L7Zj3HqpKksn3ZXmFraMHP+PZlnZ36Fc8cK1qY7ObFrEi16DmsWoaVCK3B5iUD+/IrU240AB5d+xNdPXw/AW58vJSFjHf3auZh0VjL2rlUHx7dKTuTNSWdRUlZ6TOd7fO4s2t/yeEOaXC5w5vbBvTsq3H48OPsx/nrrBDq3jOHzp2+x5HwAclInundoU/uOFjj/1P6ckeZstvmlxVcI1PQA7WjgX2riL1e1pDA3iyv6GW75ajbnXHZzRIz5ysk4xJZlX1KyexWn9W7D728YTIuEOMC7/pDLbUho26XCe2KSWpORXxKO5lrCP017VOcysgtKa50+r9Sxiub8KotJ4ZlP1/DghCEsWr6uTssadGrb8pjO9fvXFpOQ2teKZldR+fZjTFIbuoy5hfSP/skL87dYeq6Tu6dw/a9OsPSYlWl+afGl6sA/ZqJVvI2sjBwGd4pjbNdCvv3o9bD2fu3ctJrdP8yjsz2H+8/tT/9Lm89V04x5SxnZFRZuKWDa+Ba1Tp9Xqjk65+ZHWfPVe7zyyVeNvqxBhrTmrAm3WXKsukjpM4z4Nh0ZcP2Tlh73pxmPcL2lR6xK80uLr5CJtunZgbau/p7Vh4t5c9lWHO5CUuJt5BV7MPs/C3nxVejMZ/2C2RTt28zoAancd0Uf2raufrxJWnIca2c/RkxSxR662PgEonnc16JVW9iwPY8Jfajz9HmljlWo82vh/z1Gp0R37TvWQby7jDVb9rJtT+Mua9AzPp/v/jOJzuf9hp4Dh1pyzMoP3PbzjvvyWvbOs7RxH7HkfADD+1m3ZEZ1NL90qYlG519iosPVFa9O7HY7hfOfi+jxEoH807XfucrbJf+72ds44G7Dzf/vg0a/9eh2udi+cRW7lrxPt2S4Y/QAjutW94Dodd2z5JBEaXFRhdfLnNkM6dk2Ksd9Nclp2qBLTUSYcOXXty/9nsnn1e3RPrWJcdjo0rYVV/3xBd670nuhVt1yEw313drtfJB7PCecaW0BcdeFQ3Ekp1V53ZWfwdWXjWPyOR0sOU+rpIR6ZeuxarL5pUtNRI5oHi8RyH/rMbVFDC8v2kdJcTFdWuQ16nITuZlH2PDV25Qc2sHlwzrzxG/PJCEu9piOVVpcRNebn6vwWtGRdDK+fsaKpoZcc5+mrUIjXPnVe+xtzDqUbsmxtv+0hEGxPzG+j420JAfPLsqsdrmJhvp4xV76TrzdsuP5GbFVedQQwO4pNzBg3G3M2m3NuK+tn3zC3EfGWHKsmmh+afEVEtF8y9HPf+tx5urdAUtO5NNy+SJLi6+SokJ2bVjO/uVz6dnawdMXDqFD6nENOmZachz7Mo5QdKRimDvsgqdBRw6f5j5NW4VOOPKrU89+dOp57Es2pG9exc6VXyOAM2MfK3J2k5VTwow1B8nLd1a73ERDtYi3s+qzNzjt8jssfWKGTWxBC16b2Mg/tIusrdascO8uKbDkOLXR/NLiq1H5p2gHW6clKd4R8UtMBLrjmbcBypec6NuvJUmLfqbX4NMsOX5eVgYbvnkfx5FfuPCUrky4Z6Rl4bV82l3eW49fvxTk1mNeVC45ccag3hzan86CO7vx5k9O6HxKswktFTo5Rw4Gvd0lxhPR+bXzxy9545ZBvp/6Y7PZAHh25lew7ydOO64FrRbu5awh1haQf732bB56YzHGGEuLr5apaeR8/TLu4sIKr3uMh+n/eIhDHzxk0ZlC82xKzS8tvhpVNK9qH0zgkhPvrzhMm1gXq+fPOuYlJ9wuF1t+XMDBDd/TO6WMB84+joE9z2+Elns1lVuPGTlOZs1fSodYD2+uyOGGoS258r3mNU1bhUZNt7siWVrPE5j85k8IkJOVyYu3nYPHY5i7eAXvXZnMjJW5tIz18M6XS/ntr0dZ9rkREWxis+RYlbmLC6v8XZRk7CHv479y44uL+eNFxzOwV8dGObeVNL+8Guf/EtUk+cd9AXy9KZOpFyWRjJNvP3q9zscwxpCTcYglbz7NilcfZHSLLfz31qH87frhDOxpzaDRpu6lDxYRZ0p4+aIWzN2Yh4iUj5dQSkH/ERM46frHGXj1IyT2Gkp2XmH5I7kA5m7M4+WLWhBnSnjpg4WWnttjPLjLyiw9Zk1ik1qRdtql7DqUG7JzNoTml1f0db9EmWh9pFAw/nFfr31/hF/3BTweftXDzpxFc2pdcsLlKmPTkrkcXL+EQR3iefrigXRuO6jG91ipKY37+mjhT4zqYSufoj3sxXTaJCc0q/ESKjRqGmsUDb6e+kfO7JlM1/ansGjVFvYfLmHK0pzyJQ5G9bDx4Tc/8affjLfsnNeNHMCTr/6F8+562rJjJiUlk52xvcrfhd1upyj7MO12f8EZYwZbdr7GpPnlpcVXI8o5chDnp/+q8nqkj5eozh3PvF2+5MSdY1rSKt7Gle49LM6248zNDnrrMWP/bn5eMBN7wWFuGdWXYSNHEBcb+gdaL592FwkX/ImMuf+uss2dnx3y9hyrjBwnSXE2tuXYad+uPY+OMaxtClO0VcR5avJEPMZD5txnK7xuj0+kZWrVcWCRKMYG3domYbfZmPPvyWTkOLnywed5dEwyLeNtTHQdYVWOnczcAss+P61axGCz+JLO/6ihYOO+SoqK+HjRKh6+6gxLz9kYNL+O0uKrEUXreImaBC45kZ91hNaOEs7vbCosOWGMYcN3X5C5YQl9Wxv+c8VQWqcMCXPLwSN2elQa8wWw48WbQt+YYzRj3lLaxhSTW1DGmytyuH9karOboq1CoymMWR12/aN89Pk7nNwrk95d2pbfekxLcnAwM4+WDhcjOhlLPz9TP1/H0GsetORYlQUb93Vk/ktkZG9slPNZTfPrqOj4BEWpaO+yD8Z/63HWmj0U5GTQJkHIKiqkXc5i+p06kl3Lv8SRtZOLhnXn4slnWzrjp6FsNqly29H/erSY/+Nmtu4v8k2Vz2bGujIcdluz67JXoRGOYRMlxUUse/sZEuLslhyvYP8+kluMACi/9fj22kMcyc6jTYKNrCIPA3OtW3Kie9sUVs6ZhtiEouIyhl11Hy1SWjX4uJVvPZZmplO0YyVSVtzgY4eK5tdRWnw1En+XfWV2uz1quuyDqbzkxOWDkxn3wibcRXm02/Ievxndm67tG7YuV2Pp2CYZm937oO1AHo+JiuUmMnKcHMnO5+bTUrnw5Nb84sxtllO0VWjkZmYQ5676iB+b2Bp12IQzN5tzutm4c9xJFh3x6HHm/HsycHTJiRtObsHo/9vLmYOtKyYnXXj0fO8v3sj6wwcsKb4enjKLyRedXv5z2ZFdpPQfjiOpNQfe+1ODjx8KZw3uzZED6ZzerxOTnc1ziQk/Lb4aSVNZ2T6YvOxMVs+fRYsOpfxu1kHaJ7opzNnHVcP7R/x9e5fbkNC2S4XXYpJak5FfEqYW1d20DxchRVngTgHghpNbNMsp2io0PMZTJb/8rzemT6c+zu/Ot+bRQsFk5DgbfckJv5N6t2PJvP9SsCz4ONdvV25gxZqjtwyHnz+O6x57sdrjBf6dlCSnEp/akZiUNDyeyH+SlS4xUZEWX6rOCvJz2bn6W5a8O42TWxfw0IjW3P5eHu/8OpmrPijgpQ8WWjprSB2VkePkwwXLmDIugT8vLOC3Z7tJS3I02/ESqumacNfjrPvyXwzo2qb2nesgxmGnf4+jy9gEW3KiMfPrtnP7VrstMzuH256fc8zH9l7MR37hBRWXmJg0L48bh7Vq1vllSfElImOA5wE78Kox5ulK2+OAGcApQCZwlTFmlxXnjmRN4bFCAHt+WceWRR/S3pHPhFN7sjPJw5bDNka/drDClO0p7y9qlKtHq6Qlx7F29mPEJFWclRkbnwA4w9OoOpr24SJGdCqlTXxchenZQLMcL2E1zbCqxHiq5Jf/9cbUum0HDh83jtf2llpyvO2rv2fKxBh6dEwFqHbJCavzq8zlZtJryxkw/MJq9+kwvH7L7QT+nRiPm9ySItwlTmLskT2OOCPHyUsfLmLiCbrEhF+Diy8RsQNTgfOBdGCFiMwxxmwK2O03QLYxpo+IXA38E7iqoeeOZNH6WA4/Z242W3+Yj3P7Ck7sksjbdwwjxuEdAHvuSRWnbKclObjVkc1nv2REdO/X8ml3kTDur6SNf6DKtkgeM+Hv9Xp5tIM2LZN4dEy7Zjs9uzFohkUWm81G/zMa9qSL/VvXsfunhSBCUW5meXYBVZacaKz8EqC0yEn+/u0AOPPzKCosrLLfxiWflX/f7fgTOXH0ldUf0+Ygdbx3TUVXfiZF21dSlr0fz5HtlrS5sbz0wSKSHG5uPS2NAV1b82hbV7PPMCt6vk4FthljdgCIyGxgAhAYXBOAx33ffwBMERExxkRHf+kxiMZlJjweDzvWfMfen76ha3wh153WizPGjQo6YzFwynaZy4PDXcytp8TyZiONnbCKx1N1zJf/9Ug17cNFnNu1jBM7JbA7p4DerZKadXd9I9AMCyIaM8xv+/dzmXbt8b7H/XQjPq7imKtQ5JfDYeerJy7D7cuWy5/6jM6DzqrxPYmt29a4vcKYr32baXPyaGJbdWDnS7c2uL2NxT/W69ZTYnG4i3G5PTpkAmuKr87A3oCf04HKT1su38cY4xKRXCAVyLDg/BEpmpaZKCkqZMWnr2HL3s2YQe34802DaZEQV+N7/F3376w/TF5BMbhKSY4TpLSUlz5cyJ9uiczeL5txs/f1+4K+Hokycpzeq8YYN4t3u8krNuBIJ6VFfLPtrm8EmmFBRFOGVZbarR8PzlqLCOTlZvP8LefQOiWxfHuw/EqJF+KMm/Pvfo4FL95nSQG2eU8Gz8/fQovEBHqddDYnjr+lQccLvO3oKszFGZeAze6I2PwCb69XTl4h722w899VZeX5Bc17yIQVxVewRZIqXw3WZR/vjiK3A7cDXPfAkwy/eGLDWhcmLVPT6Nyj6kDLkghZZsLj8XBw189sX/geLVy5PDR+EP27j6rz+/1Ttv3d9+9dmYyrpJBdh3K586tl/PbyyOz96ti2NSfcVvVB2htfqXorMhJM+3ARnVp4uGRQS/50QTsynC6ubObd9Y3AsgxrKvkF4ckwj9uNx9PwQuK4cy4CLgJg7dcfkJVXUKH4CpZfaUkONu7NZsIM624/bt2XRcfh19K1T38AXGUNG8fWMq09PX0LRWf+NI+e/QeT0DI1YvPL3+t1fJqdiwa18s1w1PwCa4qvdKBrwM9dgP3V7JMuIg6gJZAV7GDGmOnAdIBXluxosl364ZKXlcHmbz+jZM9qTu+Tyu9vqL2Xqyb+7vuW8TZ2ZBRwYqdYzu1aFNG9X9FCZziGjGUZpvnVMJ89M4mTe6ZaeszONujSrnJHpldj3348+4QurP9iFgfWWrOQsyv3ELnfe9dajLHHEJfU0pLjNhad4Vg9K4qvFUBfEekJ7AOuBq6ptM8c4EZgGfBr4JumPFYiEm1fs4y9P86lnaOQP4wZRL/LL7DkuP7u+5d/yC3vus8r9pC/YVHE9n5Fg4wcJ6Pv/g9nd3LRJiFGZzg2Ls2wCHFclzSeuK7mcVFWauzhEx3TWvLE9edY1Fp4+8vlHDfyMsuO15gycpxM+WAhl/UTneEYRIOLL9/4h8nAfLzTtP9rjNkoIk8AK40xc4DXgLdEZBveq8WrG3reSJeUlMyOV+8O+nqoHErfyc7lC/Ac2MioAe352x2nVZj1Y4XAmUON1XVvpbTkuKBd9GnJx9771xj8C6q2b5nCgJ7tdHZQI9IMCy4SMqyxRdvwiWjJL/BmWNckQ+c2yZphQUgkX7xpt339eTweNiz6lIzNyzixYyyXn9mHPl1qnkHTUP5Hddw/vCVlLg870g/x8c+lvLnRznevPqoftHrKyHFy3p1PMWU0/HlhKe/f1I3UFnaeXdIMHid05t3R86DNWmh+1d/mtx7h2ZtD1/Pl58+wu89MZkf6Ibq3svHo/4po1X+4Dp84Bs02wzoMhl4j6pRhusJ9E5GXlcGGr2ZScmg7N5zTkzG/GxGyc1c3c8judnH+5OdYMMWamUPNQUaOk9H3/Ifzurlpk+DQ241KhUCw4RPZhR7y1+vwifrSDKsbLb6iWElRITs3rGDfD3PonRrDk2MH0bltn5C3o7qZQ3+bf5hP1mfp4Pt6eOmDReRlZ3LzRboYoVKhEmz4hDe/8jS/6kkzrG4if8EWVcXh9F0sev0pfnrtj4wwK5h17yj+fsM5dG7bKqztCpw5lOF0sXBbAc+PS+DDr5aRmVsQ1rZFg9oWI1RKNR7Nr4bTDKs77fmKEoXOfHatWcKRtQs5Li2Gl68ZSnKLweFuVgWBtx+z8ouY0AfaJAjndXNZunBhU+Sf3ejwlPLBJqOLESoVYtXl1/ndXbz0wULWbE1n+sPXa4ZVQzOsfrT4imDGGHIyDrHu8zdo68lg7MndOO/uEdhskdlhWfn2o/+5aTc7svkkQmc/Rgr/7MYrB6foYqpKhUF1+fXHti7Oe3UZyTFG16eqgWZY/UTmv+LNnKuslHXffMTXU/5A6aIXeGp8F567bRSjT+kdsYVXoOoWLnzny6Vs2XOYyx96WbvxfTJynFz0wFTe/XIpz49LYOG2AjIL3NpVr1SYBOYXQMt4G+d2LePs7jF8+s1yxj8wRfPLJyPHyeUPvcyWPYf5cMEyzbB60J6vCGGMIfPAHjbPf5u4kkxuHNmHYSPPJi42pvY3R5hqn5uGmz9OeZ/cw/v1CtJnxrylZOzfTcd4N20SEnRmkFJhFphfgC/DXBzfroQRnQ3vr9+t+eUzY95Ssg/u5Y9T3uf87jq7sT60+Aqz4kInG7/7nOyN33FKjxSev2pIhWeQRaPqZj8eyC3jnKnbefvaTjzyvxXcOP6sZt0dnZHj5NNvlvPn4Q6e/s5D+3bteXSM0ZlBSoWRP7+gYoa1jLfx09YDfLPN8PH/ftT8ynEyd/EKXpiQyiWvb+eft3bluHZxOruxjiL/HlYTtXfrBpa89jjr3niYO/pkMeuB83nwijOivvAKVLn7XlxFXDPQwdKdRYzsCudPfq5Zdt/7u+qnfbSIEZ1LOamjgwn9YnhzRY521SsVQQIzLDPXSY/Wdi47PoZkKWy2+QVH1/Ia20tIiy3lmoEO5m5yAmiG1ZH2fIWQy1XGmq/ew7lzDWf0SuKhG4eQGB8b7mY1msDue4/HcCQ7j7aJNrq0KmBY13ikqHmuoTNj3lIO7dvNh7v28n/j7GQVeTiti3D9x9nMWFeGw27TrnqlIoA/w95ee6g8v2w2KHEZYm0lzTK/4OhaXuN6p5FfWMwFfRwV8gv0dmNttPgKgV2b17H9u49pQw43ndObMy4eGe4mhURg933gI4gynC6ufHMvz49LYPJXy7jqvFN5+KWPmvw07owcJzf/7U1ysrM4p3sspSVFnNK7Y3lYTc5o4o/eUCrK+DMsWH49MSq22eXXHU+/zVO/vazCWl49u7anr92m+VVPWnw1ksL8PDZ8PZuCfT9z/vFtuG/i8aS1Sgp3s8KmpjV0mssgfP/g+g4pMWw86Obnwy4++/fRdXBArxaVikSaXxUH18eha3k1lBZfFnK5yti9eS3bF71Lt2T4/egB9Ot2bribFRGqW0Pn/jZHB+H/fv6PLFy1lTf+fFOTuYIMvFo8Ori+hI9u6Y4xRtfBUSoK1CW/HvnfCsafc2KT6gULzK/AwfWf+wbX61pex04H3FsgN/MI373zHEtevI8hOQuYOfls/n3rSPp1axfupkWcmgbht40pJmP/7iY1UDPwalEH1ysV3WrKr/F9bPxxyvtkH9zbZD7Tgfk1vo9NB9dbSHu+jpExhu2rv2P3iq/o2aKUZy4+kXat+yIi4W5aRKtuEH7bJCfFZW7+PDyWpxcuj+oryOquFieNTySrCB1cr1SUqmkS0b8uas9L323n05u7cPcX0d0LlpHj5Ka/vUF+bg5TA5aSyC/M1cH1FtHiq56cudmsnT8LydzBeQNSeeK2U6JyIdRwqW4Q/rOLMqG0kJM6OhjRqbTCOIobLjyTO55+O6JDzF9wTX/4+mqvFjdkxTF6cCqgg+uVikbV5RfAs4syuWagg9TY0vJesGjOsO07dnPFoATSYh3lvV33j2xHXzS/rKDFVx24XS62Lv+agxu+p3tiMb8fcTwDe44Kd7Oinv8qcsaag+TlO3nr0niyijyM623jnQ+PjqMoKCkt78qP1A+7v+B66YOFLPxxLf8Y05rr3tnOtMndyc/N1qtFpZqYwF4wl9tTnmH5hcVMHNKal6ZGb4bN/341qQmGcT3d5BXoUhKNQYuvahhjyM/OZN28/2J3HuSK07sz4paTm/S6XKFWeRr3WQO8V5AHM/O4ZmCxbzFWGx9+tYx3r0lj0tzI6sqvfHtx2mVpXPXOMi4fmMjSXd6xIJQV0berXi0q1dQE6wWrmGGOqM2wLkmGXx0fQ4/WdoiJ57hu7TS/LKbFVyXGGHZtWMHW/73DgPZxPDV+CJ3anhDuZjVptS3Gem7XMnqnxkREV35Ntxd7tYnh3K5l4C5j0a5i0nPcvL7mEG1bF2GzeccC6tWiUk1PU8uwr7a7eScP3l5XypHC/PIM0/yyToOKLxFpA7wL9AB2AVcaY7KD7OcG1vt+3GOMubgh520MR/btYvsPX+I+sImRJ3Tg8d+NIjZGa9NQqG0x1hfHxpGR42TikMSKXfnFR7vyK4dYYMAcS6gFvt8YUyWspr6/kEXL11Z4rllmrpNbTorl7i8KeP+mbqS2sPPsEr1ajGRNKcNU+FidYYGZY2V+pbZsUacMW32wRDOskTV0qYmHgP8ZY/oC//P9HEyRMeZE31fEhJYxhnWLP+ObqQ/iWTqdR86K5417L+Cm84do4RUmi1Zt4Z31JQydephTp+xjSHsQgfzC4grTusf2gne+XMq0y9K83eUfLaowxdsfMG/O/b78WYqZuQXVfg9U+Dnw/YFjIfxd87PmL63yXLP8wmJEYEh7GPZiOkOnHuad9SUsWrUlnH+kqmZRnWEq8liRYXXJL6DabcHyy38szbDI0NAKYwIw0vf9m8Ai4I8NPGaj279zCzuWzSMmdxcThnZj/D3DdYmICBF4BXnxA1P49lAG336Oryv/UHlX/lMXJDNzZQlpLRyM7EqVMRX+gJk0t+JgVyDo9/dfO7rKFeG0y9L4zSc/YrcJ0wPGc6Um2okzJYzr3aLSc82ScNgdgIOBvdMq/C4qYkVlhqnI1dAMu/3T5XiM4dVa8iswswK3VZdfk+Z6e9rG97FphkWAhhZf7Y0xBwCMMQdEpLpVReNFZCXgAp42xnzSwPPWW2lJMRu++ZDcHas5s0cSt1zQh67tjwt1M1Q9VNeVX+bysCP9ENcO8i5WKh53lTEV4/vY6NcujpFdC4OGWuWACyzYLnlrKbeckkS/dnG0jclgUHs7vdq0Kh/PNWNlLtcOitHnmjUNUZNhKvocS4aN6FzK+kNu+rVLrTa/KmdW4LZg+dWvXRvG9irkv18u5ds7OmiGRYBaiy8R+RroEGTTo/U4TzdjzH4R6QV8IyLrjTHbqznf7cDtANc98CTDL55Yj9NUlXFgLxvmz8SWl87vLhzE0It0iYhoFDigNa+gGFylpMQLqYlOykM5uyYAACAASURBVNyeKmMq/nlrVwDEUxY01EZ0zi7/fnyf4vKCLfCKMMPpIstZwrjT4ziSk18+nivWbiOj0MUrP5Xqc82iQCgzzOr8Uk1HXTLscFY+43q6Wby9hMwCd7X5FZhZ/drFledZamLLoPnlcnsY1xtmrixBRFi0rYD9eWWaYWFUa/FljDmvum0ickhEOvquGDsCh6s5xn7ff3eIyCLgJCBo8WWMmQ5MB3hlyQ5T628QRGF+HrvXfsehtd9wXFosL119Mi2TBh3LoVSEqNyVv/9wBgBbc7wPufWPqUhOpHz8wg2JdhZuK/CGWvbRUDuYW1Yh4AILtsArwteXu5jQzzvdOjOvsHwsxKfboE1yMilx0Kmdds1HulBmmBX5pZqmumRYdn4hPVrbmdAvhqnfZbEoSH5Vzqwyl6d827Sl2UHzKyPHSSxljOphY9iL6bRJToC4eM2wMBJjjj0fRORfQKYx5mkReQhoY4x5sNI+rYFCY0yJiKQBy4AJxphNtR2/vuG195e1bP12Dm0lh/FDu3P+yb10LFcTFxhiAIez83G7PXiwkRDnYEIfD7edEocxQmqi8M66MvJdNu442c4768ogNpFrBsXw0vdZtEpJLr8izCs2ZBcbYu3e49rtNtq1TgY0rBrVmXeH9APbmBmmxVf9bX7rEZ69+axwNyOkAjPMn18AJS7D9UNiquRX5cy6ZlAMlBXyzroyZqwtxWFH8ytcOgyGXiPqlGENLb5SgfeAbsAe4ApjTJaIDAXuNMbcKiJnAv8HePDOrvyPMea1uhy/LuFVVJDP5m/nUrDjJ07qmsgdY0/ShVAVUH2oFbs8xDu8E3092IixU16wdU5LLn+/hlQYhL74arQM0+Kr/ppj8VWd6vKrcmb5v/dv82eY5lcYhKr4amzVhZfH42HX2qWkr15IO1sul57em7NO6Kq9XEpFuxAXX41Ji6/60+JLRbV6FF9RtZhVSVEhGxbPIWvT91x+alcevrY/KS0Swt0spZRSSqk6i4ri68DOn9m28F3iS7KYdEF/Bo8djd3e0PVhlVJKKaVCL6KLr+WfzaBk1yqG9WrNS9cNIikxLtxNUkoppZRqkIguvu4dUsoJl54f7mYopZRSSlkmou/dndCrY7iboJRSSillqYguvpRSSimlmhotvpRSSimlQkiLL6WUUkqpENLiSymllFIqhLT4UkoppZQKIS2+lFJKKaVCSIsvpZRSSqkQ0uJLKaWUUiqEtPhSSimllAohLb6UUkoppUJIiy+llFJKqRDS4ksppZRSKoS0+FJKKaWUCiEtvpRSSimlQkiLL6WUUkqpEGpQ8SUiV4jIRhHxiMjQGvYbIyK/iMg2EXmoIedUSimraIYppcKhoT1fG4DLgCXV7SAidmAqMBYYAEwUkQENPK9SSllBM0wpFXKOhrzZGLMZQERq2u1UYJsxZodv39nABGBTQ86tlFINpRmmlAqHUIz56gzsDfg53fdaUCJyu4isFJGV0z/9vtEbp5RStahzhgXm15I5s0LSOKVU9Km150tEvgY6BNn0qDHm0zqcI9glpaluZ2PMdGA6AEtfrHY/pZSqi1BmWGB+vbJkh+aXUiqoWosvY8x5DTxHOtA14OcuwP4GHlMppepEM0wpFWlCcdtxBdBXRHqKSCxwNTAnBOdVSikraIYppSzV0KUmLhWRdOAMYJ6IzPe93klEPgcwxriAycB8YDPwnjFmY8OarZRSDacZppQKh4bOdvwY+DjI6/uBcQE/fw583pBzKaWU1TTDlFLhoCvcK6WUUkqFkBZfSimllFIhpMWXUkoppVQIafGllFJKKRVCWnwppZRSSoWQFl9KKaWUUiGkxZdSSimlVAhp8aWUUkopFUJafCmllFJKhZAWX0oppZRSIaTFl1JKKaVUCGnxpZRSSikVQlp8KaWUUkqFkBZfSimllFIhpMWXUkoppVQIafGllFJKKRVCWnwppZRSSoWQFl9KKaWUUiGkxZdSSimlVAg1qPgSkStEZKOIeERkaA377RKR9SKyRkRWNuScSillFc0wpVQ4OBr4/g3AZcD/1WHfUcaYjAaeTymlrKQZppQKuQYVX8aYzQAi8v/Zu+/4KKu0/+OfKzNpkEJJIHSkiAIiInYFFBuKYlsVdC2rj64ruJZ91lW3uvtb3fXRXRVs61oQQdfugoqooChKVUBA6SX09EzqlPP7Y2bCJEwgyZR7yvV+vfIymUzmvgbMl3Of+7rPCU81SikVRZphSikrhDrz1VIG+FhEDPCsMea5Fv1U1yERLUoppVqo1Rl2VLfsyFeVYNqfdKrmvopfOd1b/NTDDr5E5BOgIMi3HjDGvNfC45xmjNklIl2AeSLygzHmi2aOdwtwi+/LGcaYn7bwGDFLRG5p8YAzxul7iT2J8j4gMu8lmhmWiPkFUfx/bOAdEX15/V2JTcn4XsQYE46DLQB+ZYw5bCOqiPwRcBhj/q8Fz11mjGm2CTZeJMr7AH0vsShR3gdY914ikWH69xJ7EuV9gL6XWNXS9xLxpSZEpL2IZPs/B87F2+SqlFIxTzNMKRVuoS41camIFAKnAHNEZK7v8e4i8oHvaV2BL0VkJbAEmGOM+SiU4yqlVDhohimlrBDq3Y7vAO8EeXwXcIHv883AsW08REJcAyZx3gfoe4lFifI+IMrvJcIZpn8vsSdR3gfoe4lVLXovYen5UkoppZRSLaPbCymllFJKRVFMD75E5BER+UFEVonIOyLSweqa2qql25jEMhE5X0R+FJGNIvIbq+tpKxF5QUT2iUhcN02LSC8RmS8i63z/b/3S6praSkQyRGSJiKz0vZc/WV1TOCRKhml+xQ7Nr9jTlvyK6cEXMA8YaowZBqwH7rO4nlD4tzEJur5ZrBMRGzANGAcMBiaKyGBrq2qzl4DzrS4iDFzAPcaYo4GTgdvj+O+kDjjLGHMsMBw4X0ROtrimcEiUDNP8ih0vofkVa1qdXzE9+DLGfGyMcfm+/AboaWU9oTDGrDPG/Gh1HSE4EdhojNlsjKkHXgMmWFxTm/gWxyyxuo5QGWN2G2NW+D6vBNYBPaytqm2Ml8P3ZarvI+4bUhMlwzS/YofmV+xpS37F9OCriZ8BH1pdRBLrAewI+LqQOP1FSUQi0hc4DlhsbSVtJyI2EfkO2AfMM8bE7XtphmaYdTS/Ylgy5le09nZsVku2/hCRB/BOUb4azdpaK0zbmMSqYDsPx/3MRCIQkSzgLeBOY0yF1fW0lTHGDQz39UW9IyJDjTEx39eSKBmm+aWskKz5Zfngyxhz9qG+LyLXA+OBsSbG18U43HuJc4VAr4CvewK7LKpF+YhIKt7getUY87bV9YSDMabMt93P+cTBSvKJkmGaXyrakjm/Yvqyo4icD9wLXGyMqba6niS3FBgoIkeISBpwNfC+xTUlNRER4N/AOmPMY1bXEwoRyfffCSgimcDZwA/WVhU6zbCYofkVY5I9v2J68AVMBbKBeSLynYg8Y3VBbdXcNibxwtc0PBmYi7cx8j/GmDXWVtU2IjIL+BoYJCKFInKT1TW10WnAT4GzfL8f34nIBVYX1UbdgPkisgrvP5TzjDGzLa4pHBIiwzS/YofmV0xqdX7pCvdKKaWUUlEU6zNfSimllFIJRQdfSimllFJRpIMvpZRSSqko0sGXUkoppVQU6eBLKaWUUiqKdPCllFJKKRVFOvhSSimllIoiHXypkInI/SLyvNV1KKWUUvFAB18KEdkqIntFpH3AYzf79qc6LGPMX40xN0egrgUiUisiDhEpF5EvROSYcB9HKaUOR0ROF5FFviwqEZGvROQMEakSkewgz/9WRCaLSF8RMSKyosn380SkXkS2Ru1NqJihgy/lZwd+aXURQUw2xmQBnYEFwCvWlqOUSjYikgPMBp4EOgE9gD8B5Xg37b68yfOHAoOBWQEPt/c97jcJ2BLBslUM08GX8nsE+JV/c9CmRORxEdkhIhUislxEzgj43h9FZIbv849EZHKTn10pIpf5Pj9KROb5zhx/FJErW1Kcb2+21/AGmv91TxSRr0WkTER2i8hU36a5iMg0EXm0SR3/FZE7fZ93F5G3RGS/iGwRkTuavO4y33vdKyJxvemrUipkRwIYY2YZY9zGmBpjzMfGmFXAy8B1TZ5/HTDHGFMc8NgrwPVNnjM9kkWr2KWDL+W3DO/M0q+a+f5SYDjes76ZwBsikhHkeTOBif4vRGQw0AfvZrztgXm+53TxPe8pERlyuOJ8g6prgG8CHnYDdwF5eDf8HQv8wve9l4GJIpLi+/k83/dn+R77L7AS7xnsWOBOETnP97OPA48bY3KA/sB/DlefUiqhrQfcIvKyiIwTkY4B33sFOENEegP48mUSBw+sZgBXi4hNRI7Gu+H64ijUrmKQDr5UoN8DU0Qkv+k3jDEzjDHFxhiXMeZRIB0YFOQ13gGGi0gf39fXAG8bY+qA8cBWY8yLvtdZAbwFXHGImp4QkTLAAUzGO9Xvr2m5MeYb32ttBZ4FRvu+twTvJYGxvqdfDSwwxuwFTgDyjTEPGmPqjTGbgX/5ngPgBAaISJ4xxmGMCRzwKaWSjDGmAjgdMHizYr+IvC8iXY0xO4DPgWt9Tx8LZABzmrxMIfAjcDbeGTCd9UpiOvhSDYwx3+Pta/hN0++JyD0iss7XbFoG5OKdcWr6GpV4Q8c/kLkaeNX3eR/gJN9lwjLf61wDFByirDuMMR3whtl44E0RGear6UgRmS0ie0SkAvhrk5pe5kAgXsuBfrE+QPcmddwPdPV9/ya8lxl+EJGlIjL+EPUppZKAMWadMeYGY0xPYCjQHfin79uBlx5/Csw0xjiDvMx04Aa8s/4zIluximU6+FJN/QH4H7yX4wDw9XfdC1wJdPQNhsoBaeY1ZuG95HcKkAnM9z2+A/jcGNMh4CPLGHPb4YoyxniMMQuBjcC5voefBn4ABvouEd7fpKYZwAQRORY4Gng3oI4tTerINsZc4DvWBmPMRLyXRv+Gd8DXHqWUAowxPwAv4R2EAbwN9BCRM4HLaH5W6y3gQmCzMWZbpOtUsUsHX6oRY8xG4HXgjoCHswEXsB+wi8jvgZxDvMwHeGeXHgReN8Z4fI/PBo4UkZ+KSKrv4wRf/8Nh+QZzg4E1AXVVAA4ROQpoNIgzxhTi7VV7BXjLGFPj+9YSoEJE7hWRTF8PxlAROcF3nGtFJN9Xd5nvZ9wtqVEplXh8NwrdIyI9fV/3wjt79Q2AMaYKeBN4EdhmjFkW7HV8zzsLCPvSPCq+6OBLBfMgEDjTMxf4EG/T6TagFu/sUVC+/q638fY2zAx4vBLvrNXVwC5gD96ZpfRD1DLVt86XA+8g6rfGmA993/sV3sbWSrx9GK8H+fmXgWMIWKLCGOMGLsJ7A8EWoAh4Hu+lVIDzgTW+Yz4OXG2MqT1EjUqpxFYJnAQsFpEqvIOu74F7Ap7zMt6TzkP2chljlhljNkWqUBUfxBhjdQ1KRYyIjMJ7+bFvwAycUkopZRmd+VIJS0RS8S4c+7wOvJRSSsWKkAdfItJLROb77oRbIyIHrZIuXk+IyEYRWSUiI0I9rlKH4usjKwO6ceCOJKUOohmmlIo2exhewwXcY4xZId79rZaLyDxjzNqA54wDBvo+TsJ7l9pJYTi2UkEZY9bRuG9NqeZohimloirkmS9jzG7fYpn+hup1BCxT4DMBmG68vgE6iEi3UI+tlFKh0gxTSkVbWHu+RKQvcBwHb5nQg8Z3xxVycLgppZSlNMOUUtEQjsuOAIhIFt4F5O70bcXQ6NtBfiTobZYicgtwC8BNv3no+LGXTgpXiUqpGDfxxN7NLdwbceHIMM0vpQ5v8dv/4m8TepOelmp1KeGVfzT0PqlFGRaWwZfvrrK3gFeNMW8HeUoh0Cvg655413k6iDHmOeA5gH99sdk4al3hKFEppZoVrgzT/FLq8MrLy6CuI5gEG3y5Wr4cZDjudhTg38A6Y8xjzTztfeA63x1DJwPlxpjdoR5bKaVCpRmmlIq2cMx8nYZ3I9HVIvKd77H7gd4Axphn8G43cwHeffmqgRvDcFyllAoHzTClVFSFPPgyxnxJ8xss+59jgNtDPZZSSoWbZphS0VVXsptU+wCry7BU2Bruo0Uw5KZ6yLCB92pBbDHGUOuGcmcK5tB5rpRKMppfSkF+TjopKcm9wU7cDb5yUz10aJ+BR+wQg+GFMWQYF1TVUua0WV2NUiqGaH4ppSAO93bMsBG7wQUggkfsZGhuKaWa0PxSSkEcDr5EJHaDy08kJi8pKKWspfmlFBiPx+oSLBd3g69YsOzLz7jpotO58YJTeP35J60uRymlWkUzTFll385t9Mi2ugrr6eCrldxuN9P+3/385alXee69z1nw4bts2/Sj1WUppVSLaIYpK9XX1tK3S47VZVhOB1+t9OPqb+nWuy/devUhNTWN0eMm8PX8uVaXpZRSLaIZppT14u5ux9b45XWXUl7RdIs2yM3J4fHp77TpNYv37SG/4MB+unldu/Hjqm/bXKNSSgUTifwCzTClYkFCD77KKyoYeMvUgx7f8NzkNr+md63FxrQ5VSkVbpHIL9AMU9Yq27WRTjmZVpdhOb3s2Ep5Xbuxf8/Ohq+L9u6mU5euFlaklFItpxmmrFS2eRXnHN/f6jIsp4OvVho0dDi7tm1hT+F2nM56Pv/wPU4ec57VZSmlVItohikr6SSrV0JfdowEm93OL+7/Kw/8fCIet5tzL72avgMGWV2WUkq1iGaYUtbTwVcbnDhqLCeOGmt1GUop1SaaYcoqHrcusAoJPvjKzckJ2pyam6NrjCilYpvml0pEtuoiq0uICQk9+ArldmyllLKS5pdKRB31TkdAG+6VUkoppaJKB19KKaWUijiXsx6n02l1GTFBB19KKaWUirgt61Zx9uB8q8uICTr4UkoppVRUZKYldKt5i+ngqw0e+91dXDV6KLdeOsbqUpRSqlU0v5SyXlgGXyLygojsE5Hvm/n+GBEpF5HvfB+/D8dxrXLOhCv5y9MzrS5DKRUGml9KRUflni3kZmVYXUZMCNfM10vA+Yd5zkJjzHDfx4NhOq4ljhl5Ctm5Ha0uQykVHi+h+aVUxNXtXs9Jg/tYXUZMCMvgyxjzBVASjteKhPLSYv7fHddSURazJSqlLKL5pVR06L6OB0Sz5+sUEVkpIh+KyJDmniQit4jIMhFZ9sX7s8Jy4M/efRXPrpV8+s6MsLyeUirpaH4pFSK3bi3UIFqDrxVAH2PMscCTwLvNPdEY85wxZqQxZuSoiyeGfODy0mK+nfcm/7ysJ9/Oe1PPHpVSraX5pVQY6NZCB0Rl8GWMqTDGOHyffwCkikheNI792buvctEAGNg1k4sGoGePSqlW0fxSKjw66dZCDaIy+BKRAhHv1V4ROdF33OJIH9d/1jjp+FwAJh2fG5azx4d+fRt3XTuewq2buHbsCD56W+8cUipRaX4pFR7GGKtLiBlhWe1MRGYBY4A8ESkE/gCkAhhjngGuAG4TERdQA1xtovC34D9r7JyVCnj/6z97vPTGO9r8uvf9/elwlaiUspjml1KRt3n1Eob37mB1GTEjLIMvY8whmxuMMVOBqeE4VmusXrKQhbtrmbWqsNHjHfYvDCm8lFKJQ/NLqcirrapgUI9cq8uIGQm9zv/vn37D6hKUUqpNNL+USly6vZBSSimlIqp0w3L6FnSyuoyYEXeDL2MMxHrTnjHaWKiUOojml0pWaaae/I7ZVpcRM+Ju8FXrhhTjit0AM4YU46LWbXUhSqlYo/mlkpXLrf9TBYq7nq9yZwpU1ZJhA4nBvQqMMdS6fXUqpVQAzS+VrNJqI746S1yJu8GXQShz2sBpdSVKKdU6ml8qWeW2z7C6hJiipzdKKaWUipjKsmLS7DrcCKR/GkoppZSKmM3LFzDxjIFWlxFTdPCllFJKqcgxBrvNZnUVMUUHX0oppZSKmPItq+jZRbcWCqSDL6WUUkpFTFaGjXYZaVaXEVN08KWUUkqpiHC7XNQ6yq0uI+bo4EsppZRSEVFXW8ORXdpZXUbM0cGXUkoppSLC7aqHGFxQ2Go6+FJKKaVURKz/cg6XnNjX6jJijg6+lFJKKRURbmcdnXL0smNTOvhSSimlVETU7d1Afocsq8uIOTr4UkoppVREZLfLICVFhxpN6Z+IUkoppcKuoqSIjvZaq8uISWEZfInICyKyT0S+b+b7IiJPiMhGEVklIiPCcVyllAqV5pdSkeGsr6NnXrbVZcQke5he5yVgKjC9me+PAwb6Pk4Cnvb9V8WAhyZPxOGoBKC8uAiP8QDgcdaRkpoOQIp4x+ke40GMB0mxN/u83M55AGRlZXPf1FlRfS9KtcFLaH7FrcD8ggMZJr58Mr7sMh43kuLdX9DjrCclNc33ueZXpGxZ+jFThne3uoyYFJbBlzHmCxHpe4inTACmG2MM8I2IdBCRbsaY3eE4vmq9wMAqLdpHwdV/AaCzy0W7gn4AbH3yOvpM9v57VFe0HYD0vN7seulOOo+/m/S83gc9r3rPJmz2VAB2vPZbHrhhPKBBpmKX5lf8aS6/bDYb6W53Q04BdL/hnwBU79nckG3bn7+d3jdPAw7OOZvNhtvtbpRfoBnWFpVFuxnYY7DVZcSkcM18HU4PYEfA14W+xzS8oqhpYKVmdQLAIA0Dqeo9m0M8yoHXMgiOWpfveJt0IKbileZXDGhJfvlPEkPh9g3eUrM6UUMa7tpq3zE1w1ory1VOelqq1WXEpGgNvoItb2uCPlHkFuAWgGvv+QujLp4YyboSXnOBZcvOp/v1jwLeM7+IMKbhrFPPKFUc0/yyiKX5BbhrqxvNnNns3n8ydVb/8GqrHWTY3FaXEbOiNfgqBHoFfN0T2BXsicaY54DnAP71xeagAaea17T/wT8lHzgdD95p92jTM0oVpzS/oqi5S4r5LiftCvoD1uQXcGBW3+PWWf3DKN6zk1MG5ltdRsyK1uDrfWCyiLyGt1G1XPslIsPhqKTfzU82fL1q6m2k5/UOy3R8OAWeUdYVbadH34EAbH5+ipVlKRWM5lcUBWaYP78gHC0RYSQ2za/D2L1mEZcfm2d1GTErLIMvEZkFjAHyRKQQ+AOQCmCMeQb4ALgA2AhUAzeG47jqAP/ZYmnRPlZNva3h8fqK/Yf/YeNuaE6tL9+P2Lx3BBlXHdumeqf0m97tuO/13zfc7Rj4POPxkJaT1/C6h7Pz1ftw11RQ7GvSdzpKeOCG8XoGqaJG88t6TWe7/BnW2vxyOkpIkZRGdzseyKbGdzs2PN4k5zzGQ2pWJ2wZ7Rpm5w9l/5zHKfZlnT+/QGfBqot2cky/46wuI2aF627HQzY2+O4SsmaeOEn4zxZ3bt3QcKYIzfdDiEjDbJik2MjK8P6vkJV3ZEiB0ShEHbaGY/j7vZry1FfT7brHSE3z3urtP4vUM8jk4ayvo/CHb9m27BP+Z9QLUT++5pf1Ame7AjOs9fnVP+QBz4EMq6fUURJw1SD4VWSPs5aevjsndRbsAHt1sdUlxLRoXXZUEdD0bHHn1g24XS5qiwrJyOt50PNttgODIU9NBXWfPAFAr76hB5Zf4Os8NHkiDt8x/DUe6ozSVV+H2+Vi59YNlBbt0zPIBFZTVcm6hbOp3raKDKln9FFd+M01R1ldloqiYPkF4HY5m/2ZSOcXNJ9hgQ3/SLB7MGjIL//zkzXD6mpryGmnw4tD0T+dOBasN8JZX4erbO+BJ4mw66U7cTpK6JjXpeHhcAdWME1fP9gZpXG7Gm4lM4Ck2Bua8v3vLdnPIBOBMYadG79n25KPsdWW0MlWyy2jjuSY80/BbrdZXZ6yQIt6u3z5BTTKsGjkFwQZiDXM6psDM29NBmL+95HMGbZ9/RrOHdrl8E9MYjr4ijPN9kY4SgHvPfHG4zoQDBiyMuxhmY4PVbAzSlNbibtiL268Z40pdu+q0/WO0ob3pn0U8cnlrGftormUb1hCSl0lpwzozC8uGkDXTjrDlawOl19+TfMLwnNJMRTNzYh5aioOujRZW1RIfWXJQRmWLPm1d+WnnHhJH6vLiGk6+Iozh+uNsKel47bbG/oO6vK68P9emm1NsYfgD6AHbhjfUKv3/fgulzZZI0z7KOLDnq0b2LVxNeUbltDZXsuVp/bj5NEnkKqzW4qW9XbZ4ii/4OAMA++NR7asjgdlWLLkl7NsL/kdh1pdRkzTwVccaL43wnXgSU3u+KnzTc9nZcX2pqZZWdkNgdSopyLgTknto4hdLmc9G1cvp3T9YlzF2xjRO5ebj+7KiPNOP+hyjEpegXdjB+aXq74Oe1p63OYXBM+w+soSUjv1AJKvl9VZX0dBtp5sHY4OvuJAS3oj0nLyGTb5acA7OxSLZ4vBND2DDHyfgbSPInYU7d7B7k1r2btqPp1T67jouO4MO687PfIHWl2ailH+DAvMr5p92xvuH4zX/ILgGbZq6m10v+YhIPl6WTes+IqLjtPNtA9HB18xLNjaXfWVJex89T56XPMQcKDp0+koafhljoezxWACzyCdQW7x3vnqfTiTuI/CSnsLt7Dlmw+p37uJwQWZXDIwnzOn6OyWal6w/q7A/JKUFJwlO/HY7QmRX3AgwwLzy9/Lmiz5VbJhKcdov9dh6eArhgVbu6tm33aK5nj3NLPZU2O+N6I1DtdH4amvputVfyGzy4FNdJOpjyKa6mpr2Pr9UrYt+YiCdoY+Hew8MGog/bqfZXVpKk4E6+8KzK+MvJ4Nv8OJkF9w6F7WZMkvV7n2e7WEDr7ijKSk4HaUNiwfEU+9Ea0RrI/C7ShFUlIsrixxlRXtZcOiOVTtWEfHDMPYIV15ZMqpOrulwiYwv+BAf5fmV2Jwu1x0136vFtHBVwxq2pzqdrmo2bcdSUkhI68nadmdGDb56bjrjWiN5vooMvJ64qqvw8BBTayJNn0fac76Onau1v5VmAAAIABJREFUX83mbz6ko72WLpke7jx9IEddMoqUBP9HQkVOc831/sWf/fkF8dff1VKHyi9I3Cb8NQtn85ORvQ//RKWDr1jUtDnVP9hwluykrmh7Q39Eop0tNqdpH4Xb5UJS7KTY00jP69nQxJpo0/eRUO2oZP2iDykv/JF2rjJOH9iZ+68fQvvMdKtLUwkiWHO9q76O+ib5BYk3Yx/MofrAAvML4r8Jv3zDUk458ySry4gLOviKEcGWk3C5nAduxQY8vvVvEqU/oqWa9lEE9sAFLmaoi7EezON2s3fbenauWUL1zrV0zXBx/WkDOXr0YLLbZ1hdnkogTWe8XC4nzvo6hMbrDyZ7fsGBPrBEWozV5awn3VOja/q1kA6+YkSw5SRsmTnsmn43dnsqkLj9ES3lP4MMXA/Mv55O92se0sVYfTxuN2sXfUTZtrWkVO5mZN9cbjilHz3yT8dm08uJKjKaznjZMnPY8+qvMW4Xdnuq5tch1gPzL0sRz03429csZeyQAqvLiBs6+IphPYIMKJLpjLGpwDPIwIGqP7iSWX1dLWu/+oiyDUvIMQ6uPG0AQ4b3okvHwVaXppJUjyADCs0vr2DrgcW7Hcs/4f5r9S7HltLBl8UOv5aXaomdr96Hu6aC4oBZwnidvm+pXZt/ZPuyj6nft5mCnFSuPbE3J5x1sm5UraKqaYbVV5aw48VfkpLWTjOshXa+eh+e+mqM20Wxb5YwnvKrrraGHE+FtjK0gg6+LBZsLa/aokL2zLo/4ZeTaKtgi7G6ayroft1jDf1x8Tx93xxnXR3rli+kePUCcqlicM8O3HhWP3oXnG11aSqJNc2w2qJCjMfD3td/2yjDNL8OOOgmopoKul33WEN/XLzl17bV33DOMF3VvjV08BWDkmU5ibYKthhrsT21YeCVSPYVbmXL0nnU7FxHfnsbE4Z3Y/StJ2pTq4pZ/uUUNMOa17QJv9ieSmoc59f2ZZ/w19v0LsfW0MGXhR6aPLHRWl4H9mo0SbecRFsFW4bCy8TlGmDO+jo2f7+cfSvn46kqZljP9tx7cj8G9hprdWlKNRJsPUJvhhlsvktnmmGHlgj5VVvtoEtanZ4QtpIOvizkcFSSmtWp4XKjX13Rdjom2e3YbXWoZSiAuFgDrHT/HjYv/oji7evJS63lnCFduejGYzTMVEwLtp4XkHBbBkVSIuTX5u++4oLjelhdRtwJy+BLRM4HHgdswPPGmIebfP8G4BFgp++hqcaY58NxbKXijcvlpHDDWnau+ARn2W4G5qXyi1MGMOyS06wuLWlphinVNoXLP2XcXaOtLiPuhDz4EhEbMA04BygElorI+8aYtU2e+roxZnKox0sEgdP1tux8tj9/OwAigs1mx+kooVff/hZXebDKshJee+R/mfjr/yMrt6PV5TQSuAZYSmYOxhgAUlIzGqbvH5o80bKp+6rKcvZtXceWxXPJcldw2oBO3H/l0bTLGBTS3olFZQ5ufXgGz933Uzrntg9jxclDM6x1ml5u9IiN7c/f3ii/YrHBPl7yKzWrE263C2NMo/yKxcuP+3dt57huqSG9RrJmWDhmvk4ENhpjNgOIyGvABKBpcCmfwOn67tc/2vB44B0usfQL5rf0w9ex713Nkg9e46yJt1ldTiOB0/fpZ99x0KXc1KxODTsIRIPH7aaqspyN38ylYutKumW6Oe6IPH57wzDaZaSF7TjT5yyidM8OXp79FXdfc27YXjfJaIa1QtPLjT19GRbr63nFS341vfsdYvfy46avP+DeUweE9BrJmmHhWO66B7Aj4OtC32NNXS4iq0TkTRHpFYbjqiiqLCvhxy/e4dFLe/DjF+/gKC+1uqSY46yvo3DDaha+8giLn/s1zs8e4+YjK3np56fyyE1jmHTW0LAOvIrKHMz+fClPX5bH7M+XUlxeFbbXTjKaYQlO8ysyPLvXcmTvLm3++WTOsHDMfAW7ZmKafP1fYJYxpk5Efg68DJwV9MVEbgFuAbj2nr8w6uKJYSgxNtky2rHrpTsbvo7V6XrwnjWO7e3mrx9sZ2yfDjF59gje6fsdr/22YfshP1tGO6A+rMcyxuB2u1j35QeUrF9KLg6G9enM5EsGkh+FleWnz1nEuH5gq69gXL/UpDtzDKOwZZjml+ZXKIJtoQb+/Iotqxd+wPjjQzsHSeYME39vTJtfQOQU4I/GmPN8X98HYIwJurSxr7+ixBiTe7jX/tcXm0MrLsYE9koUXP2XhsdtNhsFvfrF7HR9ZVkJM+67mvN61vD1hiJOGZjH3MJMfvrw6zHXOwEHpu7XPH8P7trqhsedjhI6+v5xCOWy7t7tG9m8eC7OfZvonmNj/PF9OPGonqSnhdb70BpFZQ6u/PXjPHkO2F3VuOztmDIP3njkzvjumzh1Stsb4NooUhmWaPkF3gzbsXVTo/wCb4ZVz/1HTOTXpu++oqayrOHrmqoqPn3lnxzbuZ71uys5ZUBHlpflxGx+gTfDakhrlF9AQz9wLLSlfPrPKcy657w296wmZIYVDIN+o1v0BxKOma+lwEAROQLvnUBXA5MCnyAi3Ywxu31fXgysC8Nx486hbs2OZf6zxkXrS5k2Povb55QydlBazJ49+rlrq+l+wz8bvm7rqtEet5t1Sz+nZO1CqCljeK9s/nDmQPoUWLf2lv+M0e6upU9HO9vKahnXLzOpzhzDSDOshQ61PI5Vdmz4nsIVn+Iq2krX7FRG9uvM0b0ODKqmz/ma8/o4WbbVwV/HZvK/H5fStWMNM353PcNOHk3348bS9+jhltXfnKb5Bd4/Z8cnT1hU0QE/LvmMiaf2CelmoWTPsJAHX8YYl4hMBubivU37BWPMGhF5EFhmjHkfuENELgZcQAlwQ6jHjWfxNF0PsOHbr1i4qYQrBgIeD8d1hVlLSygo+SomB19NFy70s9lavm5W0e4dbFk6j9p922jnLmfCyF6MuX4Ymenh69kKxYIV6/lhSyX/WlRPToZQUWvA7uSo/euTIrjCSTOsdZrmFxD1O7Sd9XWs/Ph16rat4OT+nbj13P70yA/e+L1++x6+31TDhAEpDOgsXHCkjfc2uhma5+Th8d2Zs2Q2Cz97EXdWAYPOnkSXHn2i9j6ak5WVTWnRpoMGta3JsEjavmQuY28JbUX7ZM+wkC87RlIiTdv7p+uD9SJlUh8T0/XN8V92nDa+PX/9YDv3X9ib2/9bFdPT9uCdum933l243e5Gj+957bcHTd173G42fb+CPd9+jKkqYXBBJpecdARH9y2Idtkt4p+yf/XyLKoqy8jK6cCkNx3xPWUPllx2jJREyy9/y0TTXqQhNz8atZaJuppqVn3yOjVblnPnBUM4/qjeh/0Z/+/Kf670ntze+Nouyjztefcfdzf6XSmtqOa5uatYvbOKTsecyZAzLiQlJRz3pLVNa/IrmnauX0Xuj29y16WhDb6Kyhxcfs8/eWSMkxH98imr8XDlfyrjO8OifNlRtYDDUUnB1X85aLp+10t3QkZs/zUs/fB1LhoIn6wtobKqmk/WlHDRwPSYv+wI4Ha7m112oqx4H3u2rqdw6Vw6iIOzh3TlwmsHh/WOxEiZPmcR4wekIK4a3M56cNYwfkBK0kzZq+hqunm2X9MZsEja/v0SNnzyCvdfOoxhl45r8c/5f1fysuw8tqCYqlonee1rD/pd6ZjTjnt/cjIAs7/5kVefvIvOx4xh6KiLsNmtyehD5ZdV1s17lZd/fkrIrzN9ziJG96gn1+6mqMxBQeecpMqw2P5XX8WEDd9+xdLdVbgri3liXAZ3fLgXe3ZncvfE5mXH5hjjwVmyE1dVGdVV9Tjm/YPR/Tpz4W0nYrNZd4bbFgtWrKdwTy3/WFBBp8wUSmqqye+YQ8+9yTFlr5KHMYZvZv2DodmVbWrwXrBiPbv21TH9uz1UVDp4clw6Uz6soWbxumZ/V8afPIjxJw/i0xWb+NeTd9J5yCiGjJkQ15tfh8Om775idL92ZKSHfnPRvCXr+H5DJe+t8edXDSkpQvckyTAdfEWBfwPt/EabZ4N/A+2svNhbzT7QrX+fwWeznubI3e9w8og8rqsqYn23S2N+4JWVlc32WfcjqRl4ar3rx4g9lZSMbJy1bj78aiV/uvZ2i6tsm/cfncxjr34MO5dz96hcHvuiHHocnxShpaLLn1+NN88Gf35FcvPssqK9fDPz/7j3wiM5cdCgNr3G+496NyXw/75cOCKXHx3l0OPow/7s2BH9GTuiP199v51/TvtfBoy9hn7DQrvc1lKBy+b4V7wH764dVqx4X1dbw87PZ/LQ3S2fdTyUc048mnN6VCdtfsXX6X6c8t8h1K6gX6MPmz2VjnldYuK24UPxL1A4cUQubreb87pV8sOCN2N2ocKqynK++/g/nDK0L1l26HDcOAZMeYEj75rBwCkv0v9/niA1qyNFlXVWl9pm/sUJLxyUweUv7mD8URlJt0ihio7AOxyD5df/e2l2RDKseO8u1r7+V6bfdgonDuoe0mv5f18mHtuOjYX7mTS8Xat+X04b2pvX7j6HvrvnMu+Z31G6f09I9bTEfVNn0TGvC8MmP02XCb+m983T6H3zNHpe/2jDivfRvPy4at7r3HXR8JDucPTT/NLBl2oBf89X5/apVJeX0NFexzk9qljywWtWlwZ4m+V3bf6BL2b8H8tf+h273vkLPz2ilJd+fgqdOubSvv9IUuyx38fVGv4+ljnrHJRW1TN7raOhX0KpeFe0ezsb3vobz9x2Ju0zQ7/Ud6geyZay2VK49cLjeeraoex+/28sfvcFPB5PyLXFg6LdO8guXs2II0MbBPtpfullx6iJhduz22rDt1/x7b5aZn23naqyIjplCiU11XQp+9yyS4/O+jo2LJ5H0Q9LSHVVMaJvDrdfchR5HRpfSsjLTmfla78lNavxXZlpGZmAI4oVh9eCFevZvrsmoIellJzsLHonSb+Eiq5o5tf+wi1snf04T992Fmmp4fknKpw9knkdsvjHrWfz9ZrtPPb4XYy46h7yuh/+rsu2iJUV77974x88c3PoTfZ+ml+61ETExcOK0C3l7/uaOCKXnzz7AwPPvZFxN94dlWMbYyjZU8j6r2Zjd+whrb6cS0/pz6mDex527a1+1z5GGVnU19Y0etzpKOXYI/JZ8nR89n39+d+zqVj/FQ9d3IMnvqpMjJ4JXWoipkQ7vyrLiln76p94+vazSbWHd00rf8/XdSPac+6zO7j0/NH87qbxIb1mXb2TP7z6JXvS+3HSpTdjT43MDPvtF47Enp130OOuyiKmzVkWkWP6rV74AaNs33PFqCFhfd2EzC9daiJ2xOKK0G3h7/v6w1W5vLF0H53SXHw7dxZnXHZjxNb6MsawftnnlGxejatoC4MLMvnDmKPoU3Biq1+rvraGXjf+o9FjNfsLKfrk7+EqN6qKyhy8Ne9rnjnX+/l1I9pz5X+Wcv340+J3jRwVc6KZX876Oha/+ncenXRS2Ade/h6j/1yZzfRl5eSmeZj50SJ+ccWZIf2+pKel8vCNZ7Jq0y7+37RfMeTi2+jW7/CN/K1lJOWg1e4Btk29LuzHCrR78zqytn3O5defEdbX1fzSwVdUxPMlRz9/3xfAJ2uLmXZRFpPedLDw7RfDOvtVum83hRtWU7LmSzKcpVx0Qh+OG9uV3iFs5ZOXnc7Oov3U7C9s9LjdJsRrx8bTby1gbC8nw7tnsq2siv4dspJqjRwVPdHKry+m/41HrhpKz/wOYX1dONBjBDB7TQXPXNSeq96s4qk354c8+wUwrH93Zt1dwF9ff4WFizpzwmW3ktEuK+TX9UuRlKAD3hSJXNt2VWU5a//7DP/+xeiwNNkH0vzSy44RFc+r2jf17K+vxbFvO47KSq4Y6OTmEek8v6KO93fncd/LC9r8usYYNq9ZQfHGb6kuXMuR+RmcP7wHwwf2COtG1Yl26XH4tQ9SXVEWsC1HGjntM+jeJa/h1vq4pJcdY0q0Lnd9/8nrnN5uBz8ZFf5ZI4CL75nKrn1FlFTWMGGAh/85Pp1/La9j/u4svnv1D2E91o/b9vLgW99xxJmT6HPMyWF5zWhvtG2M4ZNnf88TEwdT0DknrK8NCZxfetkxNsTzqvZN3fr3GQ3bDP38/FwA1pVtJCvNhqO8tFWXHt0uFxtXL2PX8rlk1pcwdkhXjj+hgEFXRHaj6kS59FhU5iArTejVJY2XJvbAGBP/23KomBSNy137CjeTunM5P7lpdNhes6n3H53csM3QA+d71yTbWLaL7PQUisurwvp7M6hPV2bcdS5T/zuXz5fPZ+Rlt9E+J/TZvGhutL34P09wx5juERl4aX556VITqsUCl5x4Y+k+6mpr6ZlW0aIlJ0r372HFB6/w2dRfsWb6fZxa8zkv3Hgs/77jXCaNPZZBfbpG4R0khulzFpGfWktVrZOXl5aRl2VPutu0VWLwuN2sfHsqv7+69X2crRW4zdD0ZeXebYZSayPyeyMiTLl4JH86vxvfTf8DG5bMC/sxImXj8gUck1XO6UMjcwen5pdXfE2/xCGbzXbQtfp4WNU+GP+SE69+uy1gq6FKcpcsOGjJCY/bzfYNa9j05Xtku8vp0SGVW048guMuPtOS2hOp72vu4nVs2FXTcIv29FVO7LaUpNmWQ0VPpHuNvvtgOree1Y+c9plheb1Dacs2Q6Hq3zOfl+48lxfnLeezfy/iuMunkN2h0+F/sImsrGxKizYd9Hdhs4X3xoQNi+fRefdC7rwicqv4a3556eArgsr278Hx3iMHPS7GE/Or2gdz699nAAeWnDjh2I5csn092wZ4f1Ed5aVsXvYZRT8uJTfVzcn9cvntjUMPuxRENCx5+nYyz/sdRbMfPeh77srYXKm/OacN68/+3YWcPKg7kx2OxLhFW8WchyZPxGM8FM9+rNHjtox25HY+uA+stfZsWkte1XrGDj895NdqiabbDJ07LJvLt+0kZ1DkT4RvPOcYztpTwl9m/ZH0I0cz+Izxrdon8r6ps3jghvGUffJM0L6vhyZPDPnflHWLPqRX8WLuiuDACzS//HTwFUFW3R4cSYFLTjjKijmhq4uZ776IrXwHR3RO5aoTjuCUs0+LyY2qPWKjb5OeL4DNT94Q/WLaqKjMway5iyhI8/Dy0jKuG5mbdLdoq+iIdM/q6g9eYOaUUSG/TmsELjlRXO7g8kHw84+/5heXh7bkREv0KejEc5PP5YvV25j21K8ZePa19BkyslV3Ekai78vjdvP9gnc5qn4tt196QptfpyU0vw6IvX8hE4h/yr7pRyRvD460r9+fwaCsKl76chd//nAXRTVw2QA3QzpU8/cbR3P60N4xOfACSEkRavYXHvSRkhI/N9g99eYC0k0dz1zUntlrKhCRpOyXUNHhb5sI/HA6SkLeSHvTym84c2A29jCv53U4/r6v3IwUyiurGN49jbG9nDz11vyoHF9EGD2sLzN+eRa9Cj/k02f/QFnxvhb9bFZWNk5HyUF/H6FcenQ565n/3G85N3c7t180os2v01KaXwfozFeE+Kfsm7LZbGGZso8WYwyl+3ezdfkCKnesZdn8edg9dSxxu7B53Kzc46aw3E3BvhX88eaLrS73kLp1yibFJrjcjVcA8HgMJ942LeaXmygqczD1zflcNkhwGw/HdoUTniykU3Zm0vVLqMgrLy4i3e0+6PEUSQnpElddbQ2bPp3BX391XijltYm/7+uZb8rBVU+7VLz5VbiM3/0s9PW+Wiot1c7Pxx/PZaWV/P2tv7HKdGTIBT+jc9fm9068b+osJl908NIVbreb8uKiVtewe/M6vn9vGg9NOoH+PaLzb9J/PlmK2+mmuNqV9Pmlg68IieeV7Y0xFO3cyvqv5uAp2sKgrun86vQj6XHuSDJ+cUrDLdv+1aL/u7qMPfX1Yb9lOxJcbkNmfs9Gj6VmdaSoss6iilru6bcW0CvL0KNTNoOP6MID+S5WJuEt2io6PMZzUH75Hw/Fqo9n8cClw0hJif4MeeCSE7GQX106ZvN/N59FcXkVf3/rUb53dmDIuOvI694n6PPD8XdSW+1g8ZtPM7hdKbPuPi9qVyqKyhy43U4Ksmx8sy+TB84vSOr8is3rQyrqjDGs/eZTvvj3H1n81C9JXfZvHhyby/RfnsUDV59G/575ZKR7Fz0Ntlp0uqnjqTejM3WfjPzbcTx+QSbzN1ZRXOVO2lu0Vfyqq62hdusKhg/sYVkNsZhfnXPb87efncWTE4+i9vOn+PTZ37FhxcKwHsNRXsqiWf9g9cv386dz87n/6uj25uolx8bCMvMlIucDjwM24HljzMNNvp8OTAeOB4qBq4wxW8Nx7FgW69sKFe/ZyabFH1NduIaC9sLZx3RjzHXDaJ956Ltw/FP3UxeVMWEAuI2HM/um8NZny8OyVUek5GWns/K135Ka1XhB2LSMTMBhTVEtNH3OIs7p46ZTpr3RdD2QlFP24aYZdjAxnoPyy/94W307+2V+c+nwUMoKWSznV8ecdvzpp6Mod9Twybff8OG0N6hL70TeMaM5auSoVv+duFxOfljyOcVrPifXU8GfLh9J74IjI/02gnp7/nLO7JuiLRM+IQ++RMQGTAPOAQqBpSLyvjFmbcDTbgJKjTEDRORq4G/AVaEeO5aV7d8TdFsOK5eZMMawYdVS9qz4GHv1fgZ0zeKeU/pz1OWtW3ur6WrReVl2ft3JyQfPFLJhxz4G9uoSoXcQmiVP307mBX8ib/w9B31v939+Z0FFLVNU5uC9z5bwyJh0juydzwP5nqSerg83zbDoqd3zI4OPONvSGuIhv3KzMrn8jMFcfsZgaurqmf/tcj555UM89TXYu/YnNa8XKene330RoXT+i/yw4msAjPFQtPpzPPXV5FDFpSf0YtRNx4d1u7bW+nHbXmqqHdx7bk8KclK1ZYLwzHydCGw0xmwGEJHXgAlAYHBNAP7o+/xNYKqIiInljSVDFCvLTJQV72Pzss9wFP5Iel0J5w3rxnmTjiKn/XEhvW7gatEA4qphfH/49ZNv8M7fY7dx3eM5uOfL/3ismj5nEaN71JNrd1NU5qCgc07SbUIbYZphQYQ7w9Z9PY9LR1h3uTFQPOVXZnoaF5x8NBecfDTT3pxP93G3UVO4Djze2S53fQ3GVc/FqYsbfubYqwbSIbudVSUf5DfT3mR8f8BZA6Q2aplI1gwLx+CrB7Aj4OtCoOkqbQ3PMca4RKQc6Ay0/haNOGHFLvTgnd3avmEN2xd/gKtiH/07p3LTyN6MvHBkWI/jn7qfuXofHo9hf2kFnTJTKKndEtON9ynGzY4X7wr6eKyat2Qd32+o5L01KZTUVJPfsYaUFEna6foI0AwLIpwZ5nG7Kfz6XS771fnhKC1k8ZxfhS/dfdDjqampjD5uoAUVHV5RmYNla7awOcPwn7V7G/ILkrtlIhyDr2CLJDU9G2zJc7xPFLkFuAXg2nv+wqiLJ4ZWnUVyO+fRo+/Bvwx1EVhmwlFeyuZvP2f/hpW0d5Vz6oCO/OqKo+iQPSTsx/IL3Hnev2L0lFOzuf+/O3nqrflRvW27Nbrld2TI/xy8kfaafx18KTIWFJU52FtSyS2n5vG7szvy2BflSbsidASFLcMSJb8gvBm2Z9uPjDq6iyV3OAYTLL/uHpXLIwtKOWfKP5j35F0xOQCLt/wCb6N9fjth3s/78PLy5F3Rvqlw/CYUAr0Cvu4J7GruOSJiB3KBkmAvZox5zhgz0hgzMp6DK5KMMezdsYVFrz/B/KfuZff7D3NFl528dttI/j1lLDeNGxG1KWf/itHXjWjfsGL0Wx9/TXF5VVSOn+iefmsBUlMCbicA141oz+zPl+qfb3iFLcM0v4Lb8MlMfnbOMKvLOEhgfgFc0B8qSov1zu0w8a9on+tf0V7zq0E4Bl9LgYEicoSIpAFXA+83ec77wPW+z68APkvkXolIqK2uYtfmH/l8+iN8/cz/kvXdizw8Lp9Xbj+dv984itHD+lpSl9UrRicyXV4iajTDIqiqspzO9lraZVi/x2tTgb1fTpcHu7uWm49PY+ZHi3SAEAa6vETzQr7s6Ot/mAzMxXub9gvGmDUi8iCwzBjzPvBv4BUR2Yj3bPHqUI8b67Kystn8/JSgj7eEMYZqRwXbVi5k/9rFdLLXMKRHLlOuGESnnMHhLrfNmq4YnZMhVNR6qPx+QVT2S2utvOz0oFP0edkt3+Q2GorKHJw75Z+c3t1Fp8xUXV4igjTDggs1w/xWzZ3J78bH3qwXNO79qqiqBVc92emC1NfHZPtEvOQX6I4chyOxfPL2ry82x25xEeB2uSjdv5v1X/4Xd9FmBnRK5dSju3PakJ5kpsfeWaNf4IrReVl21uwoZcL0Iq6/ZKzl6+bEqz//ezbvfvgZlxyTw+/O60KRw8WVyXBr9qlT4mejzcNItvxqzhfT7mH6HWe2agPpaAvMMFddNVv3lvPzj1P49Nn7E/v3LYKSMsMKhkG/0S36Hz02uh+TmDGG9UsXsPCFB1nx/K+wffMsfzozm+lTzuT315zO2SP6xfTAC5qfup8+5ysuumeaTt+3QlGZg/H3TOXNjxfp5UYV9/bt3MbQbhkxPfCC4O0TpxbUc+YvHtX8aiXNsJbRvR0tUF68nx8XfYAp3gJVxYw/oS9nXTOYnPaZVpfWJsGm7nMyBGe9h6Jd25J6LZfWmj5nEZs2b+P8/kKnzDS93Kji2saF7/Gnc462uozDCtY+sbvCTdesUs2vVtIMaxkdfEXJptVL2f3tp7gr99Ovcyp3nz6Qo/ueYHVZYeG/bTtw6h7g8he385vT7Tw8fwnXjz8tcaeaw6SozMHbny6mc6bhqiFpHNm7QFezV3HNVbSF3l1jYzu1Q2m64Tb48ytN86sVNMNaTi87RkhlWTErPn6DRS89yDdP/ZJjSj7hscv7MeOXZ/HgtWdwdN8Cq0sUycqsAAAVLElEQVQMu8DLj9OXlTNhUCrHdbNzRkEt50z5h07fH4K/wb5DSg2XHZVK3442isocOlWv4taurRs4vk/rmvOt1Fx+je5ez1Nvzufy3zyjGXYImmGtozNfYbRj4zr2b1hB+eZv6ZNr42cn9eGEC463uqyo8U/dT/9uDxWVDl65NIOSGg/nH2F4baV37RxtwA/Ov57XtiphZgXMWFXP/upKXc1exa2tX8/hz+dZs4lzWzSXXxcOSOHWD74mO9XoJchD0AxrHR18haCqspy9O7awffEc0mr2c0r/jlw6qIDjLmrdRtWJwn/50b9i9GmDc3G6PGwu3MvNx6fx8keL+MUVsbf8hNX863lNvSCT38+v540betO5vU1Xs1dxzV20lZ5dYnPLm2CC5ReA0+VhbK/tpKVnMvvzpXoJMgjNsNbTwVcr7du5jd0/LKPoh8X0zIJTj8zn/klHkd1+uNWlxYzmGvDTcfPUm/P5bkMhz933Uw0wdD0vlZh2bFjDKQM7Wl1GmwTmF+DLMBdHdalj3IC0mN5+KNqKyhzc+vAMhg/syTl93HTKtGuGtZAOvg7DGMOebZvY8OW7pFTsZliPdlw9qBunnzfa6tJiVrAG/LwsO0UOF2c/r9P3gfxT9V1zcxh8RBceyHdpc6qKe7t/WM6Vx3a3uow2Cdz3Mdgahs9+pS0UftPnLKJk93ZmbtrOwlu7k5dl1wxrIW24D6K22sG6rz5k/rO/Zdnzv6HTmhn889LevHzHWdxz+cmcPrSP1SXGhcAGVoDcjBTG9nJyep9U3v50cdKuAVZU5uDy3zzD+u37dPsglZCqtq1kaL/4HHwF0jUMg/Ov5fXe/CX8eWw70k1dw1pummEtozNfeGe3SvbtZv3C96F0K9k2F+cf14uz/mck6WmpVpcXtw41fZ+f6mJPkq4BNn3OIkr37ODeqW/oVL1KOMYYumVZXUV46BqGwfnX8vrJMZl0zUzhzL4pjfILNMMOJ2kHXy6Xk62rvmHXys+x11dwZGc7fxhzNL27nh7zqzHHi+am78G7hs7vR3nX0Bl/xnDue+rthO8DKypzcOOfX6astIRHzu/ItTM38fTkPhTkpOpUvUoYaxfN5YLhvawuIyxasoZhMuXXrQ/P4KFfXNawltcFR7g5onsXHji/k+ZXKyXVZceqynJWfvYOC1/4E98+/7+cULuYqROP4sXbR3Pf1afRp6CTDrwi5FBr6Nw79Q1K9+xI+Gnq6XMWUbRrG3mptSzaWsOkoXZw1gA6Va8SR+nmVQzvn1jrGGp+NZ6xz0+t1bW8QpTwM19b1y5n7/qV1O5cS88cuHXUIPqeNpTs9hlWl5ZUmltD54L+Kcx8axMzrunOr+YuZv6KDbz0+xsS5uwp8Gzxvc+W8PtRdh7+so55693srXDz4nd7G9bBAZ2qV/HNGINUF9Ol4xCrSwmrluTX/Z8uTbhZsMD8mv35Up6Y0JlLXtxE346pzPzepWt5hSDhBl/Vjkq2fr+E4h8WY6vay5jBXbn+xG4c2Ts5196KFc2tobOnuIJJQ2tZtKWG/NTahOsDCzxbHN2jnuO62ZkwyEBaO+4e01nXwVEJpa6mml45VlcRfi3Jr/ED7Nw79Q3K9+1KmAwLzK/xA1LIS6tn0lA7HXKyuHtMZwDNsDZKiMFXRUkR65d+SuXG5XTPMlxxQm8GXzmQjjnHWl2aaiKwgdXjMewvrSC/XQr5WQ5qne6E6ANr7mzxtvHtKKmBk3oKP32nlOmrnNhtKXq2qBLGxmWfMeGYnlaXETHN5VfPDlU8clFXnvpyE+/d2JMpH8b3LFhRmYMb/vwSleVlTPPl199u7kVldTnnDbA3yi/QGfu2iNvB17YfV7NjyYdIxW56dsrg1hP7MOJCXXsr1gU24fvPIu8elctjC4qhvrpRH4X/DPK6C0/l1odnxHSI+Qdcz93302bPFr8vSefcYd6zxclFeraoEk/RplWMOCF+thRqrebyC+CxBcVMGmqnc1o94wekxH2G+e9mzEuzM2mondlrHdw9pgsD0fwKh7gZfDnKS9ny7RfsXbOIgvbCsT3bc+elR5LXIbF6C5JJS/soqurqGxpaY/WX3T/geurN+cxfvJK/BtzNWFleqmeLKim0N1V0yG5ndRlRETgL5nJ7GjKssrqWicd25Klp8Zthc7/6tuFuxoqqWs2vCIjpwdee7ZvZsPBdKNtJQY6dy47rxZhfjrK6LBUmLemjGNMrhbc+/prXJ+Vx2+zYmspvennx6cvyuGrm11w+tF2juxkH9tKzRZX4yov30yfbWF1G1ASbBWucYfa4zbCeWYazfHczkprBkb27aH6FWUwPvnJWvsT/XTyYLh0HWV2KiqBD9VGc0CuDsb2c9O+cGhNT+Ye6vNivUypjeznB7WTB1loKy/RuRpU8Sndv46j8BOy2b4FEy7CPN7mZWYHezRhBYkwMn6ksejKGi1ORENhHUeRwceXLO3hyXDqdc7Mw9kzOmLbNN5Vfx5knHcv8hV8z/pzREQ2xwLB6efZXzJ73OWNOP4UFS1byxLhMLnmxkA9u7kWOrY7icgdTPqzjjRt607m9Te8Eaq1TpyTMQnv/+mJzUuXX1689zhOX9SKrXbrVpVgq1jIsML8657bnsVc/1gyLlIJh0G90izIspEVWRaSTiMwTkQ2+/wbdxl5E3CLyne/j/VCOqRLbghXrmbm6jpHT9nHi1J0c2xVEoLK6FnHVNEzlj+sHMz9axNOX5Xmny99e0GihQ//+icXlVS36/FA/E9gL4Z+anzV3EeP6SUMz/ey1Dm+NQsNWQSOn7WPm6joWrFhv5R+pOgTNsPCpKdlNRlpMX0yJinBkWKiZFSy//K+rGRYbQv1N+Q3wqTHmYRH5je/re4M8r8YYMzzEY6kkENhHcfE9U1m4t4iFH+Cbyt/bMJX/0HnZvLqsjrz2dsb0olFPxfXjT2sUOMBhP7/7mnOD/sy0N+azYMnKRv1cndvZSDd1XNC/PZXVgc2oWdhtdsDO0P55jd6LilmaYWGS196O3W6zugzLhSPDqmrr25xZzeWX/3XHD0jRDIsBoQ6+JgBjfJ+/DCwgeHAp1WrN3dbtdHnYXLiXa45J5eWlZYjH3ainwn/H4dOX5XHTu4uxpQjPXZbHLe8twWMMzzf53N8E6z8jDPyZS15ZxM+Oz2rUzzV9WTnXHJOK3V3LEb26MtCWos2o8UszLAyKdu+gZxI127dUWzJsXD944aNFvPvT/Gbzq2lmBX4vWH4N6pLOuH7VvPDRIhbeWqAZFgNCHXx1NcbsBjDG7BaRLs08L0NElgEu4GFjzLvNvaCI3ALcAvDsr6/ilgmnhViiSgSBDa0VVbXgqicnQ+jczoHT7eHJcekUlTm4bkR7zn7eO0M1qEs6+alFHNPVxqAunRjdo5TVe90M6tK50efjB9Q2NJwG/kzndjkNZ4fF5Q5+dlwaUz6sIs2WQlG1i38trwd7ITm+raq0GTUuhTXDAvPr2nv+wqiLJ0ai5phTXryP47okZ7N9S7U0wy7oT8OMWH5qbdD8appZgd8Lll9Aw+uKCAs2VrGrwqkZZqHDDr5E5BMg2C6pD7TiOL2NMbtEpB/wmYisNsZsCvZEY8xzwHOANtyrBk2n8nftKwJgQ1kNEwYc6Knom5vVMENV5HBR4qjjgpPTqa13ccERbj7fVMeecmfD58VVbiYe246npnlXcA78mReXlDacHVZU2Rp6Id7bCJ2ys8lJh+5ddGo+1kUzwwLzK5ka7ncu+5gHrx9sdRkxrSUZVlFVi93j5ppjUpn2ZUnQ/GqaWU6Xp+F76/fVBc0vl9tDGk7O7JvCCU8W0ik7E9IzNMMsFNLdjiLyIzDGd8bYDVhgjDnkuhAi8hIw2xjz5mEPoIMvdRiBIQY0nFEe1SWNMQPaQ301k4al4nDZyLK7mbnKSaUrhVtH2Ji5yglp7Zh0TCpPfVVCh5xs74v4fuaGd2soqvZQUWvAnnbg7FDDKnKifLdjJDMsmQZfn0z9X16/c4zVZcSlwAwLnBFzueG6Y9MOyq+mmTXpmFRwVjNzlZOFhXBGTzS/rNKKux1DHXw9AhQHNKt2Msb8uslzOgLVxpg6EckDvgYmGGPWHvYAOvhSrRQYZDuLKknBA3ibXf1rbdW6PGTYvTf6ekgh1QZutweP7+Zf/8/YbCl06egdkGlgRUn0B18Ry7BkGnytmX4///yZtoiEqiX51TSz/J8D1Dg9ZKZ6c0zzywKtGHyF2vP1MPAfEbkJ2A78BEBERgI/N8bcDBwNPCsiHrxLWzzcooGXUm2gAaNaSTMsRFUVZdiNy+oyEoLmV/IIafBljCkGxgZ5fBlws+/zRcAxoRxHKaUiQTMsdHu2b+asIV2tLkOpuBLSIqtKKaWSm2P/DrIy06wuQ6m4ooMvpZRSbVa1fTWjju1ndRlKxRUdfCmllGqzGkel1SUoFXd08KWUUqrNumdZXYFS8UcHX0oppdpMUqK6Osj/b+9+Q+u66ziOfz5Nm/5LnStbt/4JWmeZlrl1Y4xtfeQskhWxtjDZHgxBoU8cKCg42ZP5WPGJDlxF2QPHxkDDiq12HUzKNt1apOsf02oMsmbpWu2sTc3ars3XB7nFUJL03HNuzu+ck/cLAjk3J/w+v97e7/3m3t/9HaARaL4AALn8/fB+bVjDZYWAdtF8AQByuTh2XutW0nwB7aL5AgDkMjoyqBuXLUkdA6gdmi8AQC4ePaXbP8EGq0C7aL4AALl89NGl1BGAWqL5AgDksuTyf1JHAGqJ5gsAkEvPkoWpIwC1RPMFAGjb2TOntWg+TyFAHjxyAABte/fwm/rqxttSxwBqieYLAJBL1zyeQoA8eOQAANp2bvhvWv4x9vgC8qD5AgC0bUl8qFuWs7s9kAfNFwAAQIlovgAAbRm/ckUXRv+dOgZQW4WaL9uP2D5qe9z2vTOc12f7uO1B208WGRMAOoUals+lixf0qZtZ7wXkVfSVryOStknaN90JtrskPSPpYUnrJT1me33BcQGgE6hhOdmpEwD1Nb/IL0fEgCR55kfhfZIGI2Kode6LkrZI+kuRsQGgKGpYPh+8f0LLF3enjgHUVhlrvlZLOjHpeLh1GwDUATXsGu8PvKVtD346dQygtq7bfNl+1faRKb62ZBxjqj8pY4bxtts+YPvAjpffyDgEAEytzBo2uX7t2/lC/tA14Cn/WQBkcd23HSNiU8ExhiX1TjpeI2lkhvF2SNohSXrzJ9M2aQCQRZk1bHL9+vm+ocbWr/Hx8dQRgFor423H/ZLW2V5ru1vSo5J2ljAuAHQCNewaY+8d04obe1LHAGqr6FYTW20PS3pA0i7be1q3r7K9W5Ii4rKkJyTtkTQg6aWIOFosNgAURw3L54alizR/flfqGEBtFf20Y7+k/iluH5G0edLxbkm7i4wFAJ1GDQOQAjvcAwAyu3Txgi5/eD51DKDWaL4AAJn999xZfa73htQxgFqj+QIAACgRzRcAAECJaL4AAJmdOPSG7lx7U+oYQK3RfAEAMhs7857uue3W1DGAWqP5AgAAKBHNFwAAQIlovgAAmY2dOanuBYX25wbmPJovAEBmK3oWaAGXFgIKofkCAAAoEc0XAABAiWi+AACZjI+P68LFC6ljALVH8wUAyOTUu0O6p7cndQyg9mi+AACZhELLFnenjgHUHs0XAABAiWi+AAAASkTzBQDIZPSfJ7V4IRusAkXRfAEAMjl19HV95cHPpI4B1F6h5sv2I7aP2h63fe8M5/3D9mHbB20fKDImAHQKNaw98+bNk+3UMYDaK/r68RFJ2yQ9m+Hcz0fEvwqOBwCdRA0DULpCzVdEDEjiLyEAtUQNA5BCWSsnQ9IrtkPSsxGxI9NvLV0xq6EAIKO2a9hNy5q3H9bKlauoy8B0FmbfgPi6zZftVyXdOsWPnoqIlzOOszEiRmyvkLTX9rGI2DfNeNslbW8d/ioiHs84RmXZ3p654aw45lI9TZmHNDtzKbOGNbF+Sf+/X7be/XTqKIXwWKmmuTgXR0QnBvuDpO9GxHUXotp+WtL5iPhRhnMPRMS0i2DroinzkJhLFTVlHlK6ucxGDeN+qZ6mzENiLlWVdS6zvtWE7aW2l139XtIXNbHIFQAqjxoGoNOKbjWx1fawpAck7bK9p3X7Ktu7W6fdIul12+9IelvSroj4fZFxAaATqGEAUij6acd+Sf1T3D4iaXPr+yFJd+UcohHvAas585CYSxU1ZR5SyXOZ5RrG/VI9TZmHxFyqKtNcOrLmCwAAANlweSEAAIASVbr5sv1D28dsH7Ldb/vjqTPllfUyJlVmu8/2cduDtp9MnScv27+0fdp2rRdN2+61/Zrtgdb/rW+lzpSX7UW237b9TmsuP0idqROaUsOoX9VB/aqePPWr0s2XpL2S7oiIOyX9VdL3E+cp4uplTKbc36zqbHdJekbSw5LWS3rM9vq0qXJ7TlJf6hAdcFnSdyLis5Lul/TNGt8nFyU9FBF3Sdogqc/2/YkzdUJTahj1qzqeE/WratquX5VuviLilYi43Dr8k6Q1KfMUEREDEXE8dY4C7pM0GBFDEXFJ0ouStiTOlEtrc8wPUucoKiJORsSfW9+PShqQtDptqnxiwvnW4YLWV+0XpDalhlG/qoP6VT156lelm69rfF3S71KHmMNWSzox6XhYNX2gNJHtT0q6W9JbaZPkZ7vL9kFJpyXtjYjazmUa1LB0qF8VNhfrV1nXdpxWlkt/2H5KEy9RPl9mtnZ16DImVTXVlYdr/8pEE9jukfRrSd+OiHOp8+QVEVckbWiti+q3fUdEVH5dS1NqGPULKczV+pW8+YqITTP93PbXJH1J0hei4vtiXG8uNTcsqXfS8RpJI4myoMX2Ak0Urucj4jep83RCRJxtXe6nTzXYSb4pNYz6hbLN5fpV6bcdbfdJ+p6kL0fEWOo8c9x+Setsr7XdLelRSTsTZ5rTbFvSLyQNRMSPU+cpwvbNVz8JaHuxpE2SjqVNVRw1rDKoXxUz1+tXpZsvST+VtEzSXtsHbf8sdaC8pruMSV20Fg0/IWmPJhZGvhQRR9Omysf2C5L+KOl228O2v5E6U04bJT0u6aHW4+Og7c2pQ+W0UtJrtg9p4olyb0T8NnGmTmhEDaN+VQf1q5Larl/scA8AAFCiqr/yBQAA0Cg0XwAAACWi+QIAACgRzRcAAECJaL4AAABKRPMFAABQIpovAACAEtF8AQAAlOh/j2LI5yLQytoAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAl8AAAHiCAYAAADWA6krAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3Xd4VGX6//H3PTNpkAIkhBp6VcCKWAELCiyIXbHr2ljRdXXXvq66ftXd/a0VxEV3VSxYwAoqNpqCAqI0UXoJPT2TPjPP74+ZCZNkEhJyMiW5X9fFtcnMyTlPcOfDfZ5zn+eIMQallFJKKRUatnAPQCmllFKqJdHiSymllFIqhLT4UkoppZQKIS2+lFJKKaVCSIsvpZRSSqkQ0uJLKaWUUiqEtPhSiMgVIvLFYf7sOhEZafGQIp6IfCYi14R7HEop64jISBHJDPc4VPOnxVeUEZFtInKWlfs0xrxpjDm7Hsd+VUQeq/azRxpjFjTkeCLSQ0SMiDh9f7aJyL0NHHZYGWPGGGNeC/c4lGrufPlQ4suKvb4cSgz3uBrLl4FFATmYF+Lja6EZRlp8qXBqY4xJBC4C/ioio6w+gIg4rN6nUirkxvuy4mjgGOC+MI/HKkcZYxJ9f9o09Ic136KXFl/NiIjcKCKbRCRHRD4Wkc4B750tIr+JSL6IvCAiC0XkBt9714rIt76vRUSeFpH9vm1Xi8ggEbkJuAK423eW9olv+8qZOBGxi8j9IrJZRApF5EcRyTjUuI0xK4B1eIPVP97OIjJbRA6IyFYRuT3gvQQReU1EckVkvYjcHXgG5xvTPSKyGigSEcch9neCiKwQkQIR2SciT/lejxeRN0QkW0TyRGS5iHTwvbcg4O/PJiIPish239/bDBFJ8b3nn+W7RkR2iEiWiDzQ4P+4SimMMXuBeVTNit+JyE++z+9OEXk44L06P3++LHnVlyW/AEMDjyciA32f9TzxtlicG/Deq74s/cyXid+JSEcReca3v19F5JjD+T0PkeVGRG4VkY3ARt9rA0TkS9/2v4nIJQHbjxWRX3yZvEtE/iwirYHPgM5ycOatc42BqKZjjNE/UfQH2AacFeT1M4As4FggDngeWOR7Lw0oAC4AHMAfgQrgBt/71wLf+r4+B/gRaAMIMBDo5HvvVeCx2sYD/AVYA/T3/exRQGqQsfYADODwfX8iUAyc7/ve5hvDQ0As0AvYApzje/9JYCHQFugKrAYyq43pZyADSKjH/pYCV/m+TgRO9H19M/AJ0AqwA8cByb73FgT8/V0PbPLtNxF4H3i92u/6km8sRwFlwMBw/39J/+ifaPhTLWO6+jLm2YD3RwKDfZ/zIcA+4Dzfe3V+/nxZshho58uLtf4sAWJ8n+v7fblxBlAI9Pe9/yrezD0OiAe+AbYCV/vy4jFgfh2/lwH6BHm91iwP+LkvfWNOAFoDO4Hr8Ob7sb6fP9K3/R7gNN/XbYFjA/7eMuvz30D/WP9HZ76ajyuA/xljVhpjyvBOy58kIj2AscA6Y8z7xhgX8Bywt5b9VABJwABAjDHrjTF76jmGG4AHjTG/Ga9VxpjsOrbPEpESvMXPC8CHvteHAu2NMY8aY8qNMVvwhudlvvcvAR43xuQaYzJ9v091zxljdhpjSuqxvwqgj4ikGWOcxpjvA15PxRuQbmPMj8aYgiDHugJ4yhizxRjjxPt3f5lUvSTwiDGmxBizCliF9x8BpVT9fCgihXiLjP3A3/xvGGMWGGPWGGM8xpjVwExgRLWfr+3zdwnwf8aYHGPMTqpmyYl4T6ae9OXGN8AcYGLANh/4cqEU+AAoNcbMMMa4gXfwXiKty0rfrFqeiPiPXVeW+z3hG3MJMA7YZox5xRjjMsasBGbjbecAb44dISLJvsxceYgxqRDQ4qv56Axs93/jKwKygS6+93YGvGeAoI2WvoCZAkwF9onIdBFJrucYMoDNDRhzGt5w+zPes7AY3+vd8U6H+0MpD+/ZZwff+1V+n2pfB3vtUPv7PdAP+NV3aXGc7/XX8V7ieFtEdovIP0Ukhpqq/N37vnYE7B+qFrvFvt9bKVU/5xljkvDmxAC82QGAiAwTkfm+loJ84JbA931q+/xVz5LAz3FnYKcxxlPt/S4B3+8L+LokyPeH+pwfa4xp4/vjb4WoK8v9qufbsGr5dgXQ0ff+hXhPwLeLt93kpEOMSYWAFl/Nx268H0IAfNf0U4FdeKeduwa8J4HfV2eMec4YcxxwJN6i5C/+tw4xhp1A74YM2jej9G+gFPhDwH62BoRSG2NMkjFmrO/9Kr8P3qKvxq6rjavW/RljNhpjJgLpwD+AWSLS2hhTYYx5xBhzBHAy3jPMq4Mcq8rfPdANcFE1iJVSjWSMWYj3ct//C3j5LeBjIMMYkwK8iLftoT72UDU/ugV8vRvIEBFbtfd3NXDYDVVXlvtVz7eF1fIt0RgzCcAYs9wYMwFvvn0IvBtkHyrEtPiKTjG+ZnD/HwfeALpORI4WkTjgceAHY8w2YC4wWETO8217KwfPiqoQkaG+M8kYoAhvUeT2vb0Pb19TbV4G/i4ifcVriIik1vN3ehJvM388sAwoEG/TfIJ4G/kHiYi/GfZd4D4RaSsiXYDJh9h3nfsTkStFpL3vDNd/u7dbRE4XkcEiYsfbM1cR8HcRaCbwJxHpKd5b4B8H3vFd4lVKWesZYJSI+Jvuk4AcY0ypiJwAXN6AfQVmSVfgtoD3fsCbgXeLSIx41zMcD7zd6N+gbnVleTBzgH4icpVvnDG+HB8oIrHiXccxxRhTgTfHAvM8VXw3B6nQ0uIrOn2Kd0rb/+dhY8zXwF/xXuvfg3cG6jIAY0wWcDHwT7zT10cAK/A2nlaXjLcfKhfv1Hc2B88y/4u3dyBPRD4M8rNP4Q2zL/B+yP+LtyG0Pub6jnmjr19iPN47mrbibR59GfCHxKN4L5tuBb4CZtXyuwDe2bVD7G80sE5EnMCzwGW+Ho6Ovn0XAOvxNvm/EeQQ/8N7iXKRb/+lVA1xpZRFjDEHgBl48w68M+aP+nrCHuLgzE59PII357biza3XA45TDpwLjMGbGS8AVxtjfm3s71CXurK8lu0LgbN92+zGe4n1H3ib9QGuAraJSAHeS7JX+n7uV7wnjlt8ma53O4aQeNt/VEvim0bPBK4wxswP93gaS0Qm4S2YqjfZKqWUUhFHZ75aCBE5R0Ta+Kax78fbE/H9IX4sIolIJxE5Rbzra/UH7sJ7p5FSSikV8RpdfIlIhu9Ok/XiXYTuj0G2ERF5TryLxq0WkWMbe1zVYCfhvRMxC+8luPN8tylHo1jgP3jX3PkG+AjvJQGlGkwzTCkVao2+7CginfAuwrlSRJLwLmZ5njHml4BtxuLtgRkLDMO7QN6wRh1YKaUsoBmmlAq1Rs98GWP2+Bdt8zX+rafqeiQAE4AZvoU3vwfa+AJPKaXCSjNMKRVqlvZ8iXcF3mPw3qIbqAtVF4XLpGa4KaVUWGmGKaVCwbInovvWN5oN3BHkESzBFrwLer1TvA9wvgngyrseO274uRODbaaUaoZuHN6rvotjWs6KDNP8Us3FqtcfZsp1Qw+9oTqo4xDoNaJeGWZJ8eVbkHM28KYx5v0gm2RSdRXhrnjXI6nBGDMdmA7w0qItug6GUqrJWZVhml9Kqfqw4m5HwbuY5npjzFO1bPYxcLXvjqETgXxT/4c1K6VUk9EMU0qFmhUzX6fgXUF3jYj87HvtfnzPyDLGvIh3RfaxwCa8DzW9zoLjKqWUFTTDlFIh1ejiyxjzLYd4iKnxrmdxa2OPpZRSVtMMU0qFmmUN96EiGFJiPMTbwXu1ILIYYyh1Q36FDVN3niulWhjNL6UURGHxlRLjoU3reDzigAgML4wh3rigqJS8Cnu4R6OUiiCaX0opiMJnO8bbidzgAhDBIw7iNbeUUtVofimlIAqLLxGJ3ODyE4nISwpKqfDS/FJKQRQWX5Fgxbff8Pvxp3Ld2JN45+Xnwz0cpZRqEM0wpcJLi68GcrvdTP2/+3nshTeZ/tFCFnz2Ids3/xbuYSmlVL1ohikVflHXcN8Qf7z6fPILqj8lBFKSk3l2xgeHtc/f1vxEp2496JTRHYARYyawdP48uvfu36ixKqVUoKbIL9AMUyoSNOviK7+ggL43Tanx+sbpkw97n9n799K+48Hn6aZ16MRvq3867P0ppVQwTZFfoBmmVCTQy44N5F1rsSptTlVKRQvNMKXCT4uvBkrr0IkDe3dVfp+1bw/t0juEcURKKVV/mmFKhZ8WXw3Uf9DR7N6+lb2ZO6ioKGfhZx9x4shzwj0spZSqF80wpcKvWfd8NQW7w8Ef7n+cB26ZiMft5uzzL6NHH21UVUpFB80wpcKvWRdfKcnJQZtTU5KTG7XfE4afyQnDz2zUPpRSqi5NlV+gGaZUuDXr4qsxt2MrpVQ4aX4p1Xxpz5dSSimlVAhp8aWUUkopFUJafCmllFJKhZAWX0oppZRSIaTFl1JKKaVUCFlSfInI/0Rkv4isreX9kSKSLyI/+/48ZMVxw+Wpv/6JS0cM4ubzR4Z7KEqpRtL8UkqFmlUzX68Cow+xzWJjzNG+P49adNywGDXhEh6b9la4h6GUssaraH4ppULIkuLLGLMIyLFiX00hPzeb/7v9SgryrBni4ONPIimlrSX7UkqFl+aXUirUQtnzdZKIrBKRz0TkyBAel28+fBPP7lV8/cEboTysUqr50PxSSlkmVMXXSqC7MeYo4Hngw9o2FJGbRGSFiKxY9PHMRh84Pzebn76cxTMXdOWnL2dZdvaolGoxNL+UUpYKSfFljCkwxjh9X38KxIhIWi3bTjfGHG+MOX74uRMbfexvPnyT8X2gb4cExvdBzx6VUg2i+aWUslpIii8R6Sgi4vv6BN9xs5v6uP6zxsuPSwHg8uNS9OxRKdUgml9KKatZtdTETGAp0F9EMkXk9yJyi4jc4tvkImCtiKwCngMuM8YYK45dF/9ZY2piDOD9XyvOHp+4exJ/unIcmds2c+WZx/L5+3rnkFLRSvNL80upUHNYsRNjTJ3z68aYKcAUK47VEGuWLWbxnlJmrs6s8nqbA4s5/7rbD3u/9/1zWmOHppSKEJpfSqlQs6T4ilQPTXsv3ENQSqnDovmlVPOljxdSSimllAohLb6UUkoppUIo6oovYww0fa9r4xhDCPpxlVJRRvNLKQVRWHyVusFmXJEbYMZgMy5K3eEeiFIq0mh+KaUgChvu8ytsUFRKvB18S+9EFGMMpW7fOJVSKoDml1IKorD4Mgh5FXaoCPdIlFKqYTS/lFIQhZcdlVJKKaWimRZfSimllFIhpMWXUkoppVQIafGllFJKKRVCWnwppZRSSoWQFl9KKaWUUiGkxZdSSimlVAhp8aWUUkopFUJafCmllFJKhZAWX0qpsDuQuZVvX3s83MNQSqmQiLrHCymlmoeigjx++/5LCn79jqO6JfHMxUPCPSSllAoJLb6UUiG1bf0qti2eTRuTz+/PGMBxY86KyIdMK6VUU9HiSynV5HL272bL8q8p3baSYb3a8LcbjyM2RuNHKdUyWZJ+IvI/YByw3xgzKMj7AjwLjAWKgWuNMSutOLZqOk9MnojTWVj5fX52Fh7jQYyHNu07Vr6emJjEfVNmhmOIKoIZY/j1+y/ZtfIbBqYJfxjWk6MmnBXuYdWg+dV8BWaYP7+AKhmm+aXCwapTz1eBKcCMWt4fA/T1/RkGTPP9r4oAtQWUcbvpdMUTANjtduLcbuLSurH71Ttodc6fcLvdAGx//W7+MO4EAGxiIyU1DdBQa6my9+5i3dfvEluwk9FHdeTiP54W7iEdyqtofkWtuk4SjdjoeNljAKS6KmjVsTcAO/87mbizbgc0v1R4WFJ8GWMWiUiPOjaZAMwwxhjgexFpIyKdjDF7rDi+arjAwMrN2h8QUC5adewFwLbnryYurRsAZVk7qvy821eIAYgjlu6Tvf9uFe/djN0RA8DOtx/kgWvHARpkzV1FeRm/fjuXA78uY1C6jYfP6k/vLv3CPax60fyKPrXlV/WTRKAyp4r3bqn8eWNM0Pwqy9qB3W7H7XZXyS/QDFPWClXTRRdgZ8D3mb7XaoSXiNwE3ARw5V2PMfzciSEZYEtQPbBiEtsBYJCgAXV4Du7LIDhLXb7jbdZCrJkxxpC1ZyfrPn2VRE8+V5zWm5PPOBmHwx7uoVlN8ysC1Ce/qp8kHg7/iWVMYjtKiMVdWuw7pmaYsk6oiq9gtzKZYBsaY6YD0wFeWrQl6Daq/moLLHtSezpf82/AO8PVJIyh87XPAHpG2ZyUlZawbsGHZK3/nmE9k3n2skGktUkM97CakuZXmIQ1vwB3aXFlhhXv3YLd4f0nU2f1VWOFqvjKBDICvu8K7A7RsVuU6v0P/in5wOl4gB0v3xrysekZZfTyeDzs3b6RzfPfoVVFHjeO6s+Q0WcS0/xmuYLR/Aqh2i4ptg/o2QpHfsHBS5jG49ZZfdUooSq+PgYmi8jbeBtV87Vfomk4nYX0uuH5yu9XT5lEXFq3w5uON+7KvokKZw42sVU2su59+8HKs1AOY42mwDPKsqwddOnRF4AtL9/W8HGqJlOYl8Ovi+dQtGU5w3qnMvWKwSS1jg/3sEJN8yuEAjPMn19weC0R9vhWlRlWXpCF2LwPdfFUlFe+fjj5hdg1v1SjWLXUxExgJJAmIpnA34AYAGPMi8CneG/T3oT3Vu3rrDiuOsh/tpibtZ/VUyZVvl5ecODQPxxQZJXnH0Ds3tkMmwiJ8d7/iySm9a5yRhd4dppX6Gb7FO/Uv/F4iE1Oq9zvoex68z7cJQVk+5r0K5w5PHDtOD2DDLOta1ewY8mHtMHJHWMGMfC8c5rtQqiaX+FXfbbLn2ENza/qJ4mJ8R2hMsP6VmZKbfnl/9mYxHbY41tVzs7X5cDcZ8n2ZZ0/v0BnwVTdrLrbsc6uUt9dQuGZJ24h/GeLu7ZtrDxThNr7IUSkcjZMbPaAIqtfvQKjtm2qhKjTXnkMf79XdZ7yYjpd/RQxsXHAwbNIPYMMvQO7d7Dlh3m4dq1l+MB0Hr5xaItYCFXzK/wCZ7sCM6zh+dW7UfkFgRlWTq4zJ+CqQfAWPk9FKV1vmAroLJiqv+afrM1Y9bPFXds24na5KM3KJD6ta43t7faDxZCnpICyr54DIKNH/QKrPmrMjvmO4R9jXWeUrvIy3C4Xu7ZtJDdrv55BhoAxhjWL5nJg7WIGpdu5+5S+9Ot2driHpVqAYPkF4HZV1PozTZ1fUHuGBTb813ap0p9f/u01w1RttPiKYsF6IyrKy3Dl7Tu4kQi7X72DCmcObdPSK1+2OrCCqb7/YGeUxu2qvJXMAGJzVDbl+383PYO03p5tm9j03cfEFuzg3OO6Mv7205rtZUUVmerV2+XLL6BKhoUivyBIIVY5q28OzrxV+9z4fw/NMFUXLb6iTK29Ec5cwHtPvPG4DgYDhsR4R72n45tSsDNKU1qIu2AfbrxnjTZHLOD9ffy/m/ZRWKO8rJS137xP7uaVnNi9FY+d05duHfuHe1iqBTlUfvlVzy+o/yXFplLbjJinpKDGpcnSrEzKC3NqZJjml/LT4ivKHKo3whEbh9vhqOw7KEtL5/9enROewdbBH0APXDuucqze38d3ubTaGmHaR3F4jDHs2rKB9V/MoH1MGdeP7MvQ8aeHe1iqhapPb5c9ivILamYYeG88sie2rZFhml/KT4uvKFB7b4Tr4EbV7vgp803PJyYmhXawDZSYmFQZSFV6KgLulNQ+iobLy9rHmi/expG/gxN6JnPndceQkpgQ7mGpFirwbuzA/HKVl+GIjYva/ILgGVZemENMuy6A9rKq4LT4igL16Y2ITW7PkMnTAO/sUCSeLQZT/Qwy8PcMpH0Uh+aqKOfXpfPY/8tSBrT18MDpA+ifcUa4h6VUZYYF5lfJ/h2V9w9Ga35B8AxbPWUSna94AtBeVhWcFl8RLNjaXeWFOex68z66XPEEcLDps8KZU/lhjoazxWACzyArgtzivevN+6jQPooqjDHk7N/Duk9fIb48h4mn9OCUm04gLjYm3ENTLVyw/q7A/BKbjYqcXXgcjmaRX3AwwwLzy9/LqvmlAmnxFcGCrd1Vsn8HWXO9zzSzO2IivjeiIQ7VR+EpL6bDpY+RkH7wIbottY+ioryMtYs+Yf+axRyX0Zp/XzSE9LZHh3tYSlUK1t8VmF/xaV0rP8PNIb+g7l5WzS8VSIuvKCM2G25nbuXyEdHUG9EQwfoo3M7cyseDtFT7dmxiw9cziS3J4qZR/TnmrBbzfEXVDATmFxzs79L8Ui2NFl8RqHpzqtvlomT/DsRmIz6tK7FJ7RgyeVrU9UY0RG19FPFpXXGVl2GgRhNrc52+L8zL4dcln+Pc+D3H92zD8xOHaPO8ili1Ndf7F3/25xdEX39XfdWVX6BN+EqLr4hUvTnVX2xU5OyiLGtHZX9EcztbrE31Pgq3y4XYHNgcscSlda1sYm1u0/db1v7IjiUf0BYnt446giHjz9aFUFXEC9Zc7yovo7xafkHzm7EPpq4+sMD8Am3Cb0m0+IoQwZaTcLkqDt6KDXh86980l/6I+qreRxHYAxe4mGFzWIw1a08mW5d/SfnOVZzcN5VHWsjzFVX0qz7j5XJVUFFehlB1/cGWnl9wsA9MF2NtuTTVI0Sw5STsCcnsnnEnDof3zrXm2h9RX/4zyMD1wPzr6XS+4omoXYzVGMO67z5n76oFDE63c8eJvTjiglHhHpZSDVJ9xsuekMzeN+/GuF04HDGaX3WsB+ZflkKb8FsOLb4iWJcgBUVLOmOsLvAMMrBQ9QdXtNmfuY3fFswmtjCTccd24bw/Dg/3kJSyTJcgBYXml1ew9cBUy6LFV5gdei0vVR+73rwPd0kB2QGzhJE4fV9eVsovCz8ia8MKhnaN59HR/ejRcUC4h6XUYaueYeWFOex85Y/YYltphtXTrjfvw1NejHG7yPbNEkZifinraPEVZsHW8irNymTvzPub/XIShyvYYqzukgI6X/1UZX9cJE3fezwe9u3cyi+fz6Ctzck1I/sxbMwIbZ5XzUL1DCvNysR4POx758EqGab5dVCNm4hKCuh09VOV/XGRlF+qaWjxFYFaynIShyvYYqzZjpjKwitSlBY7Wfv1bHI3/chpfdsy7apBtE1uFe5hKdWk/MspaIbVrnoTfrYjhpgIyy/VtLT4CqMnJk+sspbXwWc1mha3nMThCrYMhZcJyxpgbpeLPVvXs2Xhe7SRIm4bNZCB547CbtfFFVXzEmw9Qm+GGey+S2eaYXWLtPxSoaPFVxg5nYXEJLarvNzoV5a1g7Yt7Hbsw1XXMhRAyNYAK8jJYv3CDyjduZrT+qdzz7XH0Co+tkmPqVQ4BVvPC2h2jwxqSpGSX9WVFhdBWWFIj9nSWFJ8icho4FnADrxsjHmy2vvXAv8CdvlemmKMedmKYysVTltXf8+OpZ+Q6ijm3t8dRe+LztFeriikGabUQZtXLWFItzbhHkaz1ujiS0TswFRgFJAJLBeRj40xv1Tb9B1jzOTGHq85CJyutye1Z8fLtwIgItjtDiqcOWT06B3mUdZUmJfD2//6CxPv/n8kprQN93CqCFwDzJaQjDEGAFtMfOX0/ROTJ1oydb8/cytbvp+HZ+8vnDGoE4/cPCxsz1fMynNy85NvMP2+q0hNaR2WMUQ7zbCGqX650SN2drx8a5X8isQG+2jJr5jEdrjdLowxVfIrlJcfC37+nGtvCc3SNy01w6yY+ToB2GSM2QIgIm8DE4DqwaV8AqfrO1/z78rXA+9wicTr+8s/ewfHvjUs+/Rtzpg4KdzDqSJw+j7urNtrXMqNSWxX+QSBw2GMYdX8j8j9dSmD0+08OHwAPTuf06gxW2HG3CXk7t3Ja3O+484rzg73cKKVZlgDVL/c2NWXYZG6ntdv339J3ooP2bw9k/2bNvLxY9cydIi1y7tsOlDGmDufOeyfr76GYX0uP/7y7RxK1swjOdH6m3hGH90lZE/WaKkZZsXfbhdgZ8D3mcCwINtdKCLDgQ3An4wxO4NsoyJUYV4Ovy36gKnnd+HWOR9wwtjLIu7ssSlkbv6VrUvnEFuwkwuG9WDs7ZGzEGpWnpM5C5cz7YI0Js1ZzjXjTmlRZ44W0gxrxkoK87hqRF8emfYT827szKQ5B7jngivq/KyUlVfw/bpt9T7GS/O3HHoji5UW5nP6wHQ6plo3wyginHhkj5AVXi05w6z4Gw7W4GKqff8JMNMYUyYitwCvAWcE3ZnITcBNAFfe9RjDz51owRAjQ7RO14N31uvMbm4e/3QHZ3ZvE5GzX+Cdvt/59oPeR3cUHADxXQ4UIddp6jV1X1ZawtqvZ1GwbRWn9krixrG96Zo+MES/Qf3NmLuEMb3AXl7AmF4xLe7M0UKWZVhzzi+oeoe2P7/A+4+2p6QgIvNrwCljePThyxmZAffP3c/p3RIP+Vn5bNlGZu3rTIeMXvU6Rr9zx1ky1sDLj8bjrpJf/ufX+tsnjhhxHst+XQ0VlhwagN1bfqO4bBOjh4Vm4eeWnGFWFF+ZQEbA912B3YEbGGOyA759CfhHbTszxkwHpgO8tGhL9QCMatE2Xe/nn/Xq0bWcwqJiHO5W/LYoMme/7psys+qjO649eCmgroULjTHs2LCWDV/PpGN8OTeP7MsxE04P5dAbxH/G+PwocFeUM7a3g9u+bFlnjhayLMOac35B1Tu0u1ZrmSj76rmIzK+92zfxzQ/r2J4aQ7dkwF3BnIW1f1Y6nfc39h7I5fyLLiY7f0e9jrHfCG3a30pC68YVnoGXH52lrir5Bd6/Z+dXzwEQ36o1A449iaXvTcVTUtCo4/oV7N9PyqDQ9Bu39AyzovhaDvQVkZ547wS6DLg8cAMR6WSM2eP79lxgvQXHVSHin/VasiGXqeMSuXVuLmf2j43Y2a+GyN67iw3ffoxz53rGDE7n7t8fS3LrhHAP65D8Z4wOdynd2zrYnlfKmF4JLerM0UKaYc3Yovemc/tpqazZ5WTa7xKYNLeI0/u3qfWzsufDRxp8jHcXrGXtnkwy+oR+hjy1ZDv/un5EyI/bWC09wxrNcPfsAAAgAElEQVRdfBljXCIyGZiH9zbt/xlj1onIo8AKY8zHwO0ici7gAnKAaxt73Ghmj2/F7lfvqPw+ki83Amz86TsWb87hor6Ax8MxHWDm8hw65nwXkcVX9YUL/ex27xS+MR5Wfz2brN+WMyTd5m2e73R2VC0RsWDlBn7dWshLS8pJjhcKSg04KhhwYEOLCC4raYY1TPX8AiL2Dm2A7H27eWVzAX3aeHAbD0d1gFdWFDAot/bPyorfdvH05xtJTq5fJuc7izl+ULplY05MTCI3a3OV/IKDGRZoV1krJr3yoyXHzc/P575zBzC4V2dL9leXlp5h4r8lPxI1p2n7JyZPZOe2zcQktqvyuj2+FQmUR+R0vV9hXg5v3HcZU8e15vFPd3D/77px6ydFXPXkOxF32THQA9eOo9U5f8LtdmOMwV1wgKLfllD0y0L6d0tn9f/+hCNMS0Q0Vlaek0vufpY3L0ykqDCPxOQ2XD7LyXv/uiO6p+xPvi16KuBDaG755e9XDcwwe3wrjrzh3xHbMgEH82t4upMHT0/iurd3k+dpzYdP31nls/LL9v389/OfSGuTzNotu+l/8f106tEnbOMOzK9Ae99+kIwevZvkjvgdG9bRZfN73DJ+qOX7ri4rz8mFdz3Dv0ZWcGyv9uSVeLjk3cLozrCOQ6DXiHplmK5wHyJOZyEdL3usxhIIu1+9A+Ij+z/D8s/eYXxf+OqXHAqLivlqXQ7j+8ZFxWXHirISXPu3ULbrN+Lad6Pj6Vexe/8mSj3uqC28wDtlP66PDXGV4K4oh4oSxvWxtZgpexVa1R+e7Vd9BiwS+fOrosTGjBX5FJVWkNa6tMZnxW4TjMfDuvKOnDDpHlpFwJUIt9tt+bI5dTEY7LbQnP/MmLuEEV3KSXG4ycpz0jE1uUVlWGT/q9/M2O32GtPIFc4cEtMic7reb+NP37F8TxHuwmyeGxPP7Z/tw5GUSsreyLzs6PF42Ld9A56iHPa+cQ+xKWnYHLG49m2kaO3XxMYnAM5wD7NRFqzcQObeUp5eUEC7BBs5JcW0b5tM130tY8pehUf1DIv05zduW/sDy758n4UFORQVl5Aa5+b5MXHc9lkJJT+sr/JZ6Z/RnoeuHMHFTy9gw5K5h3W8Tv2PoUP3/paMPfDO7UD2+FZAOQW5WWxZ/pUlx/Lbu2MbI4cmW7rP2ny5bD1rNxby0Tp/fpVgswmdW0iGafEVAv7bs+OqTR/b7XbapqVH5IKqgW7+5xt8M3Ma/fZ8wNCj2nLejg1s73MhY667M9xDq6IwL4f1iz+hZOuPDOudyrY3/8yQm6ZiO+ceXO6qV4B2vf0gJ0yayrJpt4ZptI3z8b8n89SbX+DcspxVmU6Ozkikdc+hLSK0VGgFLi8RyJ9fkXq5EWDvkvf56smrAHj90yUkZK2mf7qLSackYc+o2RzfJqkVr006hbKK8sM63sNzZtLh+ocbM+RKgXdu7925pcrlx71vP8gjN0ygS0oMnz55vSXHA5BjOtO9Y7tDb2iBUScM5KQ0Z4vNLy2+QqCuB2hHA/9SE3+7NIXi/Bwu7m+4/ou3Oe2C6yKi52vL2uXsXPIRbcXJnWMGM+C8qs3zLrchoX3XKj8Tk9iWrMKyUA/VMv7btE/vUkFuUfkhb59X6nBFc35VxCTzz49+5u4JR7Fg2ep6LWvQuX3KYR3rz/9dSEJqXyuGXUP1y48xie3oOvp6Mt//B8/N22DpsY7tnsxVZxxp6T6r0/zS4kvVg79nok28jZysPIZ0jmNMRjGL338lbLNf+3dtZ9uyL3DtXstpA9J55MahIVuVORLMmLuEkRkwf0MR08a1PuTt80q1RKdd9wA/f/EuL334RZMva5AlbTllwo2W7Ks+kvsMJb5dJ4646jFL9/vjjPu5ytI91qT5pcVXyETb7dmBNv70HT/tL+W1pRtxuItJjrdRUOrB7P4kpMWXMYY1i+ZyYO1ihnRw8JeT+9CvW90f1LSkOFa9/SAxiVVn6KK972vByg2s3VzAhD7U+/Z5pQ5XqPNr/n8epHMr96E3rId4dwU/b9jJph1Nu6xBz/hCvn1mEl3O+j09Bx1vyT6rP3Dbz9v35bX0rado5z5gyfEAhve3bsmM2mh+6VITTc6/xETHy6qendjtdornPR3R/RKB/Ldrv3Wpd0r+j29vYo+7Hdf9v1lNfulxz7ZNbPz2Q+ILMzn3+AzGn9SwR1/0uvIp8kikvLSkyusVzlyO6tk+Kvu+muVt2qBLTUSYcOXX4hf+zOSz6vdon0OJcdjo2r4Nl97zHO9e4r0xoLblJhrr21WbmZU/gCNPtraAuPV3x+NISqvxuqswi8suGMvk0zpacpw2iQn069b0xVezzS9daiJyRHO/RCD/pcfU1jG8uGAXZaWldG1d0GTLTZSXlbL2m/fJ27ySYd1b8fiYfmR0OPzVo8tLS8i47ukqr5UcyCTrq382dqhh0dJv01ahEa786j3mRmbuy7RkX5t/XMTg2B8Z18dGWqKDpxZk17rcRGN9sHwnfSfeZNn+/IzYajxqCGD7lKs5YuyNzNxuTd/Xxg8/ZM79oy3ZV100v7T4ColovuTo57/0+OZP2wOWnCgkZdkCy4ovj8fD7q0bWf/F66THlvL7kf04fnzjn6+YlhTHrqwDlByoGuYOu+Bp9N7Do6Xfpq1CJxz51blnfzr3PPwlGzLXr2Triq8QwJm1i+V528nJK2PGz3spKHTWutxEY7WOt7Pyk1cZduHNlj4xwya2oAWvTWwU7ttGzkZrVrh3lxVZsp9D0fzS4qtJ+W/RDrZOS2K8I+KXmAh08z/fAKhccqJv/xQSF/xKryHDGr3vYmcha796h/xtqzljQCp3XX+0pc9XXDbtVu+lx69eCHLpsSAql5w4aXBv9u3O5MtbuvHaj07oclyLCS0VOnkH9ga93CXGE9H5tfWHz3n1+sG+7wZis9kAeOrNL2DXjwzr15o283dyylHWFpCPXHEq9766EGOMpcVXSmoaeV+9iLu0uMrrHuNh+uP3sm/WvRYdKTTPptT80uKrSUXzqvbBBC458d7y/bSLdfHTvJmHteSEq6KcXZvWsXXRLNrHlvPnc46g7wWjKkOyKTSXS49ZeU5mzltCx1gPry3P4+rjU7jk3ZZ1m7YKjboud0WytJ5HMvm1HxEgLyeb5288DY/HMGfhct69JIkZK/JJifXw1udL+MNFp1v2uRERbNI0GeYuLa7x36IsawcFHzzCNc8v5J7xAxjUq1OTHNtKml9eTfcvnWp2/H1fAF/9ks3U8Ykk4WTx+6/Uex95WftYMmsaS6bdRa/dn/LfG4byzI0j6d8tvUkLr+bkhVkLiDNlvDi+NXPWFSAilf0SSikYOGICx1z1MIMuu59WvY4nt6C48pFcAHPWFfDi+NbEmTJemDXf0mN7jAd3RYWl+6xLbGIb0oadz7Z9+SE7ZmNofnlF3/RLlInWRwoF4+/7+u93B7ioL+DxcEYPOx8v+LjOJSc8bjdbVi9l17K5dIwr5+FxR9OtwzmWTssfSnPq+3p//o+c3sNWeYv20OczaZeU0KL6JVRo1NVrFA2+mnoPJ/dMIqPDcSxYuYHd+8uYsiSvcomD03vYmP3Nj/z19+MsO+aVI4/gsZf/xlm3PmnZPhMTk8jN2lzjv4Xdbqckdz/p2z/jpNFDLDteU9L88tLiqwnlHdiL86N/1Xg90vslanPzP9+oXHLiltEptIm3cYl7Bwtz7Tjzc2tcety3YxObl36KPXszZw3uwmM3n0RMmB5mvWzarSSc81ey5vy7xnvuwtwwjOjwZOU5SYyzsSnPTof0Djww2rCqOdyirSLOE5Mn4jEesuc8VeV1e3wrUlJr9oFFohgbdGufiN1m4+N/TyYrz8kldz/LA6OTSIm3MdF1gJV5drLziyz7/LRpHYPN4lM6/6OGgvV9lZWU8MGCldx36UmWHrMpaH4dpMVXE4rWfom6BC45UZhzgLaOMkZ1MZVLThhjWPX1++Rt+IEhHWN49KyBZHSIjFk+j9jpUa3nC2DL89eGfjCHacbcJbSPKSW/qILXludx58jUFneLtgqN5tCzOvSqB3j/07c4tlc2vbu2r7z0mJboYG92ASkOFyM6G0s/P1M/Xc3xl99tyb6qC9b3dWDeC2TlrmuS41lN8+ug6PgERalon7IPxn/pcebPOyjKy6JdgpBTUkzi7rnEuJzEFe7kopN7c87oEeEeag02m9S47Oh/PVrM+2E9G3eX+G6Vz2XG6gocdluLm7JXoRGOtomy0hKWvvFPEuKsmSUv2r2LpNbePPJfenxj1T4O5BbQLsFGTomHQfnWLTnRvX0yKz6ehtiEktIKhl76J1ont2n0fqtfeizPzqRkywqkorTR+w4Vza+DtPhqIv4p++rsdnvUTNkHE7jkRI+ds4mngikL9jAgzcE/xnehU9oRYR5h7Tq1S8JmF1zuqguPezwmKpabyMpzciC3kOuGpfK7Y9vymzO/Rd6irUIjPzuLOHfNR/zYxNakbRPO/FxO62bjlrHHWLTHg/v5+N+TgYNLTlx9bGvO/s9OTh5iXTE56XcHj/fewnWs2b/HkuLrvikzmTz+xMrvKw5sI3ngcByJbdnz7l8bvf9QOGVIbw7syeTE/p2Z7GyZS0z4afHVRJrLyvbVeTweflm+mAXv/oeLjoilwu5hUHsbGzZvjYoHW7vchoT2Xau8FpPYlqzCsjCNqP6mzV6AlOSAOxmAq49t3SJv0Vah4TGeGvnlf70pfTT1Yf44yppHCwWTleds8iUn/I7pnc6iuf+jaGlM0PcXr1jL8p8PXjIcPmosVz74fK37C/xvUpaUSnxqJ2KS0/B4Iv9JVrrERFWR/6+ligiFeTms/WImxbt/w+bcy20ntuKmYUlc8tpOXhzfmktnFfHCrPmW3jWkDsrKczL7y6VMGZvAQ/OL+MOpbtISHS22X0I1XxNufZjVn/+LIzLaHXrjeohx2BnY4+CzD4MtOdGU+XXjmX1rfS87N48bn/34sPftPZmP/MILqi4xMWluAdcMbdOi88uS4ktERgPPAnbgZWPMk9XejwNmAMcB2cClxphtVhw7kkX7Y4XKy0r5ZeFHZG/8ke4pwkOjB9Gj4zmce9cU3t9fwcvLd1W5ZXvKewua5OzRKmlJcax6+0FiEqvelRkbnwA4wzOoepo2ewEjOpfTLj6uyu3ZQIvsl7CaZlhNYjw18sv/elNq274j+/uN5b87yy3Z3+afvmPKxBh6dEoFqHXJCavzq8LlZtJ/l3HE8N/Vuk3H4YNrfS+YwP8mxuMmv6wEd5mTGHtk9xFn5Tl5YfYCJh6pS0z4Nbr4EhE7MBUYBWQCy0XkY2PMLwGb/R7INcb0EZHLgH8Alzb22JEsWh/L4fF42LdjC7/Me522NifXjOzHsDHDq6zJVf2W7bREBzc4cvnkt6yInv1aNu1WEsY+Qtq4u2q8F8k9E/5ZrxfPdtAuJZEHRqe32Nuzm4JmWGSx2WwMPGlUo/axe+Nqtv84H0Qoyc+ussRNqPJLgPISJ4W7NwPgLCygpLi4xnbrFn1S+XW3AUdz9NmX1L5Pm4PUcd41FV2F2ZRsXkFF7m48BzZbMuam8sKsBSQ63NwwLI0jMtryQHtXi88wK2a+TgA2GWO2AIjI28AEIDC4JgAP+76eBUwRETHGRMd86WGItmUmSoudrP16Njkbf2RE/3a8ePUg2iS1qnX7wFu2K1weHO5SbjgulteaqHfCKh5PzZ4v/+uRatrsBZyZUcHRnRPYnldE7zaJLXq6vglohgURbRkWaPN3c5h2xQDf4366ER9XtecqFPnlcNj54tELcPuy5cInPqHL4FPq/JlWbdvX+X6Vnq9d62l37NnEtunI1hduaPR4m4q/1+uG42JxuEtxuT3aMoE1xVcXYGfA95lA9actV25jjHGJSD6QCmRZcPyIFA3LTLhdLvZsXc+Whe/RRoq4bdRABp47Cns9prD9U/dvrdlPQVEpuMpJihOkvJwXZs/nr9dH5uyXzbjZ+cqfgr4eibLynN6zxhg3C7e7KSg14MgkuXV8i52ubwKaYUFEQ4bVJrVbf+6euQoRKMjP5dnrT6Nt8sGTyWD5lRwvxBk3o257mi+f/5MlBdj6HVk8O28DrVsl0OuYUzl63PWN2l/gZUdXcT7OuARsdkfE5hd4Z73yCop5d62d/62sqMwvaNktE1YUX8EWSap+NlifbbwbitwE3ARw5V2PMfzciY0bXZikpKbRpUfNRsuyCFhmoiAni/ULP6B052qGD0jnnmuPoVV8bIP24b9l2z99/+4lSbjKitm2L59bvljKHy6MzNmvTu3bcuSNNR+kve6lmpciI8G02Qvo3NrDeYNT+Os56WQ5XVzSwqfrm4BlGdZc8gvCk2EetxuPp/GFRL/TxgPjAVj11SxyCoqqFF/B8ist0cG6nblMmGHd5ceNu3LoNPwKMvoMBMBV0bg+tpS0DvT0LRSd/eNceg4cQkJKasTml3/Wa0CanfGD2/jucNT8AmuKr0wgI+D7rsDuWrbJFBEHkALkBNuZMWY6MB3gpUVbmu2UfjhsXrWEzO/n0t5RzL2/O4reFzX++Yr+6fuUeBtbsoo4unMsZ2aURPTsV7TQOxxDxrIM0/xqnE/+OYlje6Zaus8uNuiaXn0i06upLz+eemRX1nw2kz2rrFnI2ZW/j/zvvGstxthjiEtMsWS/TUXvcKydFcXXcqCviPQEdgGXAZdX2+Zj4BpgKXAR8E1z7pWIJPt2bmHrD1/g2beeMwd15O83D7P0+Yr+6fsXv8+vnLovKPVQuHZBxM5+RYOsPCdn3/YMp3Z20S4hRu9wbFqaYRGiX9c0Hr2y7r4oKzV1+0SntBQeveo0i0YLb3y+jH4jL7Bsf00pK8/JlFnzuaC/6B2OQTS6+PL1P0wG5uG9Tft/xph1IvIosMIY8zHwX+B1EdmE92zxssYeN9IlJiax5eXbgr7e1IwxrJ7/ITnrl3JUp1geHN6Pnp2b5v/ogXcONdXUvZXSkuKCTtGnJcWFYTS18y+o2iElmSN6puvdQU1IMyy4cGZYqERb+0S05Bd4Mywj0dClXZJmWBASySdvOm3fMJmbf2XLkk+Id+7ighO6M2ZYv5Ac1/+ojjuHp1Dh8rAlcx8f/FrOa+vsfPvyA/pBa6CsPCdn3fIEU86Gh+aX89613UhtbeepRS3gcUIn3xY9D9o8BM2vhlv/+v08dV3oZr78/Bl228lJbMncR/c2Nh74uoQ2A4dr+8RhaLEZ1nEI9BpRrwzTFe6jXFlJMWu/nkXB9tWc2iuJm8b1oUv70D5fsbY7h+xuF6MmP82XU6y5c6glyMpzcvbtz3BWNzftEhx6uVGpEAjWPpFb7KFwjbZPNJRmWP1o8RWFPB4POzeu47ev3qJTQgW3nN6Po887PWzjqe3Oob/P28+Ha3K0+b4BXpi1gILcbK4br4sRKhUqwdonvPlVoPnVQJph9RP5C7aoSkUFeSydNY1vnruD3js/4n83HM9zN53O0X27hHtoQNU7h7KcLuZvKuLZsQnM/mIp2flF4R5exDvUYoRKqaaj+dV4mmH1pzNfEa6irIzMDavY9u37dG7l4a9jB9Gj49mNXiKiKQRefswpLGFCH2iXIJzVzWXpwoXNkf/uRoennFm/GF2MUKkQqy2/RnV38cKs+fy8MZPp912lGVYLzbCG0eIrQuXs3836b2bhObCRc4Z04ZFJJ1u6RERTqH750f/ctOscuXwYoXc/Rgr/3Y2XDEnWxVSVCoPa8uue9i7OenkpSTFG16eqg2ZYw+hlxwhSUV7GhuXzWfTiveTOe5onRqfx2h3ncMWZgyO+8ApU28KFb32+hA079nPhvS/qNL5PVp6T8XdN5Z3Pl/Ds2ATmbyoiu8itU/VKhUlgfgGkxNs4M6OCU7vH8NE3yxh31xTNL5+sPCcX3vsiG3bsZ/aXSzXDGkBnviLAnq2/snnJpyQU7eTsIV0595aTcURRsVVdrc9Nw809U94jf/9uPYP0mTF3CVm7t9Mp3k27hAS9M0ipMAvML8CXYS4GpJcxoovhvTXbNb98ZsxdQu7endwz5T1Gdde7GxtCi68wMcbw0xfvUrh5BUd3jucf446gY2r/cA/LErXd/bgnv4LTpm7mjSs6c//Xy7lm3Cktejo6K8/JR98s46HhDp781kOH9A48MNronUFKhZE/v6BqhqXE2/hx4x6+2WT44OsfNL/ynMxZuJznJqRy3iub+ccNGfRLj9O7G+tJLzuG2Pb1K1k840lWTLudizrt443bR/Lni06kY2pyuIdmuerT9+Iq4fJBDpZsLWFkBoya/HSLnL73T9VPe38BI7qUc0wnBxP6x/Da8jydqlcqggRmWHa+kx5t7VwwIIYkKW6x+QUH1/Ia00tIiy3n8kEO5vziBNAMqyed+QqBspJiVn3+JsW71jPqiFRuP7836W0Hh3tYTS5w+t7jMRzILaB9Kxtd2xQxNCMeKWmZa+jMmLuEfbu2M3vbTv4z1k5OiYdhXYWrPshlxuoKHHabTtUrFQH8GfbGqn2V+WWzQZnLEGsra5H5BQfX8hrbO43C4lLO6eOokl+glxsPRYuvJuLxeNi0ZgXbFs+mSysXfxo1kCMuPjPcwwqpwOn7wEcQZTldXPLaTp4dm8DkL5Zy6VkncN8L7zf727iz8pxc9/fXyMvN4bTusZSXlXBc706VYTU5q5k/ekOpKOPPsGD59ejpsS0uv25+8g2e+MMFVdby6pnRgb52m+ZXA2nxZbGCnCzWffkWJXs3ccFxnXnoxhNI8q1z0pLVtYZOS2nC9zfXd0yOYd1eN7/ud/HJvw+ugwN6tqhUJNL8qtpcH4eu5dVYWnxZoKy0hG1rlpP5/Uf0aufg0TGD6Zp+TriHFVFqW0PnznYHm/D/PO8H5q/cyKsPXdtsziADzxYPNteX8f713THG6Do4SkWB+uTX/V8vZ9xpRzerWbDA/Apsrv/U11yva3kdPm24P0wej4fMzb/yxQv38+ubf+VUzw+89ceRPH7NcLqmtw338CJWXU347WNKydq9vVk1agaeLWpzvVLRra78GtfHxj1T3iN3785m85kOzK9xfWzaXG8hnflqoOLCAtZ+9Q7529Zw1hGp/Pn3x+llxQaorQm/faKT0go3Dw2P5cn5y6L6DLK2s8VJ41qRU4I21ysVpeq6iehf4zvwwreb+ei6rtz2WXTPgmXlObn2769SmJ/H1IClJAqL87W53iJafNWDq6KcXRvXsnXxbNrHlvGX0UfS58JR2Gw6cdhQtTXhP7UgG8qLOaaTgxGdy6v0UVz9u5O5+ck3IjrE/AXX9PuuqvVscW1OHGcPSQW0uV6paFRbfgE8tSCbywc5SI0tr5wFi+YM27xlOxcPTiAt1lE523XnyHT6ovllBS2+6pB7YC/rF7yPe8+vnDG4I3+9cShxsTHhHlaz4T+LnPHzXgoKnbx+fjw5JR7G9rbx1uyDfRRFZeWVU/mR+mH3F1wvzJrP/B9W8fjotlz51mamTe5OYX6uni0q1cwEzoK53J7KDCssLmXiUW15YWr0Zti8734iNcEwtqebgiJdSqIpaPFVjcftZsuqJexe/ikd48p5ZNzRdOuozfNNofpt3Kcc4T2D3JtdwOWDSn2LsdqY/cVS3rk8jUlzImsqv/rlxWkXpHHpW0u5cFArlmzz9oJQUULfDD1bVKq5CTYLVjXDHFGbYV0TDWcMiKFHWzvExNOvW7rml8W0+PLZu30jW7//DFvOZkYN7sL/3XxSVD9fMZocajHWMzMq6J0aExFT+XVdXuzVLoYzMyrAXcGCbaVk5rl55ed9tG9bgs0mgJ4tKtUcNbcM+2Kzm7cK4I3V5RwoLqzMMM0v6zSq+BKRdsA7QA9gG3CJMSY3yHZuYI3v2x3GmHMbc1yrGGP46atZFGxcztGdYnl01AC6pvcO97BanEMtxvr8mDiy8pxMPKpV1an80oNT+dVDLDBgDifUAn/eGFMjrKa+N58Fy1ZVea5Zdr6T64+J5bbPinjv2m6ktrbz1CI9W4xk0Z5hKjJYnWGBmWNlfqWmtK5Xhv20t0wzrIk1tmP8XuBrY0xf4Gvf98GUGGOO9v0Je2jt+G0Ni1//Jz++eAcXpO/mzdtH8JeLT9IlIiLAgpUbeGtNGcdP3c8JU3ZxVAcQgcLi0iq3dY/pBW99voRpF6R5p8vfX1DlFm9/wLw257vKZylm5xfV+jVQ5fvAnw/shfBPzc+ct6TGc80Ki0sRgaM6wNDnMzl+6n7eWlPGgpUbwvlXquoWlRmmIpcVGVaf/AJqfS9Yfvn3pRkWGRp72XECMNL39WvAAuCeRu6zSZSVFLP6q3co2rGO0/ul8IfxvemUdmS4h6WqCTyDPPeuKSzel8XiT/FN5e+rnMp/4pwk3lxRRlprByMzqNFT4Q+YSXOqNrsCQb++84qza5wRTrsgjd9/+AN2mzA9oJ8rtZWdOFPG2N6tqz3XLBGH3QE4GNQ7rcrvoiJW1GSYig6NzbCbPlqGxxhePkR+BWZW4Hu15dekOd6ZtnF9bJphEaCxxVcHY8weAGPMHhFJr2W7eBFZAbiAJ40xHzbyuPXi8XjYtn4Vm+a/Q+dWLm4/qz+Dzj89FIdWFqhtKr/C5WFL5j6uGOxdrFQ87ho9FeP62OifHsfIjOKgoVY94AILtvNeX8L1xyXSPz2O9jFZDO5gp1e7NpX9XDNW5HPF4Bh9rlnzENEZpqLb4WTYiC7lrNnnpn96aq35VT2zAt8Lll/909sxplcx//t8CYtv7qgZFgEOWXyJyFdAxyBvPdCA43QzxuwWkV7ANyKyxhizuZbj3QTcBHDlXY8x/NyJDTiMV2FeDmvmvUXJng2MO7oDD9x0Aomt4hq8HxU5AhtaC4pKwVVOcryQ2spJhdtTo6fiHzdkAMefBN4AACAASURBVCCeiqChNqJLbuXX4/qUVhZsgWeEWU4XOc4yxp4Yx4G8wsp+rli7jaxiFy/9WK7PNYsCocwwK/JLNU/1ybD9OYWM7elm4eYysovcteZXYGb1T4+rzLPUVilB88vl9jC2N7y5ogwRYcGmInYXVGiGhdEhiy9jzFm1vSci+0Skk++MsROwv5Z97Pb97xYRWQAcAwQtvowx04HpAC8t2mIO+Rv4lJeVsuOXlexY8iHdUoSHRw+iuy4R0WxUn8rfvT8LgI153ofc+nsqklpR2b9wdSs78zcVeUMt92Co7c2vqBJwgQVb4BnhK8tcTOjvvd06u6C4shfio03QLimJ5DjonK5T85EulBl2uPmlmr/6ZFhuYTE92tqZ0D+Gqd/msCBIflXPrAqXp/K9aUtyg+ZXVp6TWCo4vYeNoc9n0i4pAeLiNcPCSIw5/HwQkX8B2caYJ0XkXqCdMebuatu0BYqNMWUikgYsBSYYY3451P7rE15ZezJZ//U72PK2MfaYDC48dSB2u64831IEhhjA/txC3G4PHmwkxDmY0MfDjcfFYYyQ2kp4a3UFhS4bNx9r563VFRDbissHx/DCdzm0SU6qPCMsKDXklhpifauN2O020tsmARpWTerk2ySUh2vKDNPiq+HWv34/T113SriHEVKBGebPL4Ayl+Gqo2Jq5Ff1zLp8cAxUFPPW6gpmrCrHYUfzK1w6DoFeI+qVYY0tvlKBd4FuwA7gYmNMjogcD9xijLlBRE4G/gN48N5d+Ywx5r/12X9t4VVWWsK2n79l309f0T3FcMe5x5HWJvGwfw/VPNUWaqUuD/EOb4HuwUaMncqCrUtaUuXPa0iFQeiLrybLMC2+Gq4lFl+1qS2/qmeW/2v/e/4M0/wKg1AVX02tenhlblzD1u8/I7lsH+cc3ZWxQ/voLJdSzUmIi6+mpMVXw2nxpaJaA4qviF/h3uWqYNWX71G09SeOy0jg9gsGkZqiS0QopZRSKjpFdPG16NXHSSrbx8RT+zJi/MhwD0cppZRSqtEiuvh6+qLepLU5KtzDUEoppZSyTEQ3TGkTvVJKKaWam4guvpRSSimlmhstvpRSSimlQkiLL6WUUkqpENLiSymllFIqhLT4UkoppZQKIS2+lFJKKaVCSIsvpZRSSqkQ0uJLKaWUUiqEtPhSSimllAohLb6UUkoppUJIiy+llFJKqRDS4ksppZRSKoS0+FJKKaWUCiEtvpRSSimlQkiLL6WUUkqpENLiSymllFIqhBpVfInIxSKyTkQ8InJ8HduNFpHfRGSTiNzbmGMqpZRVNMOUUuHQ2JmvtcAFwKLaNhAROzAVGAMcAUwUkSMaeVyllLKCZphSKuQcjflhY8x6ABGpa7MTgE3GmC2+bd8GJgC/NObYSinVWJphSqlwCEXPVxdgZ8D3mb7XghKRm0RkhYismP7Rd00+OKWUOoR6Z1hgfi36eGZIBqeUij6HnPkSka+AjkHeesAY81E9jhHslNLUtrExZjowHYAlz9e6nVJK1UcoMywwv15atEXzSykV1CGLL2PMWY08RiaQEfB9V2B3I/eplFL1ohmmlIo0objsuBzoKyI9RSQWuAz4OATHVUopK2iGKaUs1dilJs4XkUzgJGCuiMzzvd5ZRD4FMMa4gMnAPGA98K4xZl3jhq2UUo2nGaaUCofG3u34AfBBkNd3A2MDvv8U+LQxx1JKKatphimlwkFXuFdKKaWUCiEtvpRSSimlQkiLL6WUUkqpENLiSymllFIqhLT4UkoppZQKIS2+lFJKKaVCSIsvpZRSSqkQ0uJLKaWUUiqEtPhSSimllAohLb6UUkoppUJIiy+llFJKqRDS4ksppZRSKoS0+FJKKaWUCiEtvpRSSimlQkiLL6WUUkqpENLiSymllFIqhLT4UkoppZQKIS2+lFJKKaVCSIsvpZRSSqkQalTxJSIXi8g6EfGIyPF1bLdNRNaIyM8isqIxx1RKKatohimlwsHRyJ9fC1wA/Kce255ujMlq5PGUUspKmmFKqZBrVPFljFkPICLWjEYppUJIM0wpFQ6NnfmqLwN8ISIG+I8xZnq9fqrDkU06KKWUqqcGZ9iATklNP6pmpvWwkzX3/z979x0fVZX+cfxzMjMpkAIhgdCRqjQRsWABFBuIYlsV7Kur6wquba2ru7r+Vndd3VXBvoqIYG8LIqICoihVAQHpLRBKeiZ1yvn9MTNhkkwgyZR7Z+Z5v155mUwmc58B8+Xcc597johe6Z2a/NQjDr6UUl8BOQG+9ZDW+tMmHudUrfVepVR7YL5S6let9beNHO9m4GbvlzO01tc08RimpZS6uckDTpOT92I+sfI+IDzvJZIZFov5BRH8f6zP7WF9efldMad4fC9Kax2Kgy0E7tFaH7ERVSn1V8Cutf5XE567QmvdaBNstIiV9wHyXswoVt4HGPdewpFh8vdiPrHyPkDei1k19b2EfakJpVRrpVSa73PgHDxNrkIIYXqSYUKIUAt2qYmLlVK5wHBgjlJqnvfxTkqpz71P6wB8p5RaDSwD5mitvwjmuEIIEQqSYUIIIwR7t+PHwMcBHt8LjPV+vg04toWHiIlrwMTO+wB5L2YUK+8DIvxewpxh8vdiPrHyPkDei1k16b2EpOdLCCGEEEI0jWwvJIQQQggRQaYefCmlnlJK/aqUWqOU+lgp1cbomlqqqduYmJlS6jyl1Eal1Bal1P1G19NSSqnXlVIHlFJR3TStlOqqlFqglNrg/X/rj0bX1FJKqWSl1DKl1Grve3nU6JpCIVYyTPLLPCS/zKcl+WXqwRcwHxiotR4MbAIeMLieYPi2MQm4vpnZKaUswFRgDNAfmKCU6m9sVS02DTjP6CJCwAncrbU+BjgZuC2K/06qgTO11scCQ4DzlFInG1xTKMRKhkl+mcc0JL/Mptn5ZerBl9b6S6210/vlj0AXI+sJhtZ6g9Z6o9F1BOFEYIvWepvWugZ4BxhvcE0t4l0cs9DoOoKltc7TWq/yfl4GbAA6G1tVy2gPu/dLm/cj6htSYyXDJL/MQ/LLfFqSX6YefNXzW2Cu0UXEsc7Abr+vc4nSX5RYpJTqARwHLDW2kpZTSlmUUj8DB4D5WuuofS+NkAwzjuSXicVjfkVqb8dGNWXrD6XUQ3imKN+OZG3NFaJtTMwq0M7DUT8zEQuUUqnAh8AdWutSo+tpKa21Cxji7Yv6WCk1UGtt+r6WWMkwyS9hhHjNL8MHX1rrsw73faXUdcA4YLQ2+boYR3ovUS4X6Or3dRdgr0G1CC+llA1PcL2ttf7I6HpCQWtd7N3u5zyiYCX5WMkwyS8RafGcX6a+7KiUOg+4D7hQa11hdD1xbjnQRyl1lFIqEbgS+MzgmuKaUkoB/wU2aK2fMbqeYCilsn13AiqlUoCzgF+NrSp4kmGmIfllMvGeX6YefAFTgDRgvlLqZ6XUS0YX1FKNbWMSLbxNw5OAeXgaI9/TWq8ztqqWUUrNAn4A+imlcpVSNxpdUwudClwDnOn9/fhZKTXW6KJaqCOwQCm1Bs8/lPO11rMNrikUYiLDJL/MQ/LLlJqdX7LCvRBCCCFEBJl95ksIIYQQIqbI4EsIIYQQIoJk8CWEEEIIEUEy+BJCCCGEiCAZfAkhhBBCRJAMvoQQQgghIkgGX0IIIYQQESSDLxE0pdSDSqnXjK5DCCGEiAYy+BIopXYopfYrpVr7PXaTd3+qI9Ja/11rfVMY6lqolKpSStmVUiVKqW+VUoNCfRwhhDgSpdRpSqkl3iwqVEp9r5Q6XSlVrpRKC/D8n5RSk5RSPZRSWim1qt73s5RSNUqpHRF7E8I0ZPAlfKzAH40uIoBJWutUoB2wEHjL2HKEEPFGKZUOzAaeBzKBzsCjQAmeTbsvrff8gUB/YJbfw629j/tMBLaHsWxhYjL4Ej5PAff4NgetTyn1rFJqt1KqVCm1Uil1ut/3/qqUmuH9/Aul1KR6P7taKXWJ9/OjlVLzvWeOG5VSlzelOO/ebO/gCTTf656olPpBKVWslMpTSk3xbpqLUmqqUurpenX8Tyl1h/fzTkqpD5VSB5VS25VSt9d73RXe97pfKRXVm74KIYLWF0BrPUtr7dJaV2qtv9RarwHeBK6t9/xrgTla6wK/x94Crqv3nOnhLFqYlwy+hM8KPDNL9zTy/eXAEDxnfTOB95VSyQGeNxOY4PtCKdUf6I5nM97WwHzvc9p7n/eCUmrAkYrzDqquAn70e9gF3Alk4dnwdzTwB+/33gQmKKUSvD+f5f3+LO9j/wNW4zmDHQ3coZQ61/uzzwLPaq3TgV7Ae0eqTwgR0zYBLqXUm0qpMUqptn7fews4XSnVDcCbLxNpOLCaAVyplLIopY7Bs+H60gjULkxIBl/C3yPAZKVUdv1vaK1naK0LtNZOrfXTQBLQL8BrfAwMUUp19359FfCR1roaGAfs0Fq/4X2dVcCHwGWHqek5pVQxYAcm4Znq99W0Umv9o/e1dgAvAyO931uG55LAaO/TrwQWaq33AycA2Vrrx7TWNVrrbcCr3ucAOIDeSqksrbVda+0/4BNCxBmtdSlwGqDxZMVBpdRnSqkOWuvdwCLgau/TRwPJwJx6L5MLbATOwjMDJrNecUwGX6KW1voXPH0N99f/nlLqbqXUBm+zaTGQgWfGqf5rlOEJHd9A5krgbe/n3YGTvJcJi72vcxWQc5iybtdat8ETZuOAD5RSg7019VVKzVZK7VNKlQJ/r1fTmxwKxKs51C/WHehUr44HgQ7e79+I5zLDr0qp5UqpcYepTwgRB7TWG7TW12utuwADgU7Af7zf9r/0eA0wU2vtCPAy04Hr8cz6zwhvxcLMZPAl6vsL8Ds8l+MA8PZ33QdcDrT1DoZKANXIa8zCc8lvOJACLPA+vhtYpLVu4/eRqrW+9UhFaa3dWuvFwBbgHO/DLwK/An28lwgfrFfTDGC8UupY4BjgE786tterI01rPdZ7rM1a6wl4Lo3+A8+ArzVCCAForX8FpuEZhAF8BHRWSp0BXELjs1ofAucD27TWO8NdpzAvGXyJOrTWW4B3gdv9Hk4DnMBBwKqUegRIP8zLfI5ndukx4F2ttdv7+Gygr1LqGqWUzftxgrf/4Yi8g7n+wDq/ukoBu1LqaKDOIE5rnYunV+0t4EOtdaX3W8uAUqXUfUqpFG8PxkCl1Ane41ytlMr21l3s/RlXU2oUQsQe741Cdyuluni/7opn9upHAK11OfAB8AawU2u9ItDreJ93JhDypXlEdJHBlwjkMcB/pmceMBdP0+lOoArP7FFA3v6uj/D0Nsz0e7wMz6zVlcBeYB+emaWkw9QyxbvOlx3PIOrPWuu53u/dg6extQxPH8a7AX7+TWAQfktUaK1dwAV4biDYDuQDr+G5lApwHrDOe8xngSu11lWHqVEIEdvKgJOApUqpcjyDrl+Au/2e8yaek87D9nJprVdorbeGq1ARHZTW2ugahAgbpdQIPJcfe/jNwAkhhBCGkZkvEbOUUjY8C8e+JgMvIYQQZhH04Esp1VUptcB7J9w6pVSDVdKVx3NKqS1KqTVKqaHBHleIw/H2kRUDHTl0R5IQDUiGCSEizRqC13ACd2utVynP/lYrlVLztdbr/Z4zBujj/TgJz11qJ4Xg2EIEpLXeQN2+NSEaIxkmhIiooGe+tNZ53sUyfQ3VG/BbpsBrPDBde/wItFFKdQz22EIIESzJMCFEpIW050sp1QM4joZbJnSm7t1xuTQMNyGEMJRkmBAiEkJx2REApVQqngXk7vBuxVDn2wF+JOBtlkqpm4GbAW68/4njR188MVQlCiFMbsKJ3RpbuDfsQpFhkl9CHNnSj17lH+O7kZRoM7qU0Mo+Brqd1KQMC8ngy3tX2YfA21rrjwI8JRfo6vd1FzzrPDWgtX4FeAXg1W+3aXuVMxQlCiFEo0KVYZJfQhxZSUkxVLcFHWODL2fTl4MMxd2OCvgvsEFr/UwjT/sMuNZ7x9DJQInWOi/YYwshRLAkw4QQkRaKma9T8WwkulYp9bP3sQeBbgBa65fwbDczFs++fBXADSE4rhBChIJkmBAiooIefGmtv6PxDZZ9z9HAbcEeSwghQk0yTIjIqi7Mw2btbXQZhgpZw32kKDQZNjfJFvBcLTAXrTVVLihxJKAPn+dCiDgj+SUEZKcnkZAQ3xvsRN3gK8Pmpk3rZNzKCiYML7QmWTuhvIpih8XoaoQQJiL5JYSAKNzbMdmCeYMLQCncykqy5JYQoh7JLyEEROHgSyll3uDyUcqUlxSEEMaS/BICtNttdAmGi7rBlxms+O4bbrzgNG4YO5x3X3ve6HKEEKJZJMOEUQ7s2UnnNKOrMJ4MvprJ5XIx9f8e5PEX3uaVTxexcO4n7Ny60eiyhBCiSSTDhJFqqqro0T7d6DIMJ4OvZtq49ic6dutBx67dsdkSGTlmPD8smGd0WUII0SSSYUIYL+rudmyOP157MSWl9bdog4z0dJ6d/nGLXrPgwD6ycw7tp5vVoSMb1/zU4hqFECKQcOQXSIYJYQYxPfgqKS2lz81TGjy++ZVJLX5Nz1qLdUlzqhAi1MKRXyAZJoxVvHcLmekpRpdhOLns2ExZHTpycN+e2q/z9+eR2b6DgRUJIUTTSYYJIxVvW8PZx/cyugzDyeCrmfoNHMLendvZl7sLh6OGRXM/5eRR5xpdlhBCNIlkmDCSTLJ6xPRlx3CwWK384cG/89DvJ+B2uTjn4ivp0buf0WUJIUSTSIYJYTwZfLXAiSNGc+KI0UaXIYQQLSIZJozidskCqxDjg6+M9PSAzakZ6bLGiBDC3CS/RCyyVOQbXYIpxPTgK5jbsYUQwkiSXyIWtZU7HQFpuBdCCCGEiCgZfAkhhBAi7JyOGhwOh9FlmIIMvoQQQggRdts3rOGs/tlGl2EKMvgSQgghRESkJMZ0q3mTyeCrBZ55+E6uGDmQWy4eZXQpQgjRLJJfQhgvJIMvpdTrSqkDSqlfGvn+KKVUiVLqZ+/HI6E4rlHOHn85j7840+gyhBAhIPklRGSU7dtORmqy0WWYQqhmvqYB5x3hOYu11kO8H4+F6LiGGDRsOGkZbY0uQwgRGtOQ/BIi7KrzNnFS/+5Gl2EKIRl8aa2/BQpD8VrhUFJUwP/dfjWlxaYtUQhhEMkvISJD9nU8JJI9X8OVUquVUnOVUgMae5JS6mal1Aql1IpvP5sVkgN/88nbuPeu5uuPZ4Tk9YQQcUfyS4gguWRroVqRGnytArprrY8Fngc+aeyJWutXtNbDtNbDRlw4IegDlxQV8NP8D/jPJV34af4HcvYohGguyS8hQkC2FjokIoMvrXWp1tru/fxzwKaUyorEsb/55G0u6A19OqRwQW/k7FEI0SySX0KERqZsLVQrIoMvpVSOUp6rvUqpE73HLQj3cX1njROPzwBg4vEZITl7fOLeW7nz6nHk7tjK1aOH8sVHcueQELFK8kuI0NBaG12CaYRktTOl1CxgFJCllMoF/gLYALTWLwGXAbcqpZxAJXCljsDfgu+ssV2qDfD813f2ePENt7f4dR/454uhKlEIYTDJLyHCb9vaZQzp1sboMkwjJIMvrfVhmxu01lOAKaE4VnOsXbaYxXlVzFqTW+fxNgcXBxVeQojYIfklRPhVlZfSr3OG0WWYRkyv8//Ii+8bXYIQQrSI5JcQsUu2FxJCCCFEWBVtXkmPnEyjyzCNqBt8aa3B7E17WktjoRCiAckvEa8SdQ3ZbdOMLsM0om7wVeWCBO00b4BpTYJ2UuUyuhAhhNlIfol45XTJ/1T+oq7nq8SRAOVVJFtAmXCvAq01VS5vnUII4UfyS8SrxKqwr84SVaJu8KVRFDss4DC6EiGEaB7JLxGvMlonG12CqcjpjRBCCCHCpqy4gESrDDf8yZ+GEEIIIcJm28qFTDi9j9FlmIoMvoQQQggRPlpjtViMrsJUZPAlhBBCiLAp2b6GLu1layF/MvgSQgghRNikJltolZxodBmmIoMvIYQQQoSFy+mkyl5idBmmI4MvIYQQQoRFdVUlfdu3MroM05HBlxBCCCHCwuWsARMuKGw0GXwJIYQQIiw2fTeHi07sYXQZpiODLyGEEEKEhctRTWa6XHasTwZfQgghhAiL6v2byW6TanQZpiODLyGEEEKERVqrZBISZKhRn/yJCCGEECLkSgvzaWutMroMUwrJ4Esp9bpS6oBS6pdGvq+UUs8ppbYopdYopYaG4rhCCBEsyS8hwsNRU02XrDSjyzAla4heZxowBZjeyPfHAH28HycBL3r/K0zgiUkTsNvLACgpyMet3QC4HdUk2JIASFCecbpbu1HajUqwNvq8jHZZAKSmpvHAlFkRfS9CtMA0JL+iln9+waEMU9580t7s0m4XKsGzv6DbUUOCLdH7ueRXuGxf/iWTh3QyugxTCsngS2v9rVKqx2GeMh6YrrXWwI9KqTZKqY5a67xQHF80n39gFeUfIOfKxwFo53TSKqcnADuev5bukzz/HlXn7wIgKasbe6fdQbtxd5GU1a3B8yr2bcVitQGw+50/89D14wAJMmFekl/Rp7H8slgsJLlctTkF0On6/wBQsW9bbbbteu02ut00FWiYcxaLBZfLVSe/QDKsJcry8+jTub/RZZhSqGa+jqQzsNvv61zvYxJeEVQ/sGypmQBoVO1AqmLftiCPcui1NAp7ldN7vK0yEBPRSvLLBJqSX76TxGC4vIM3W2omlSTiqqrwHlMyrLlSnSUkJdqMLsOUIjX4CrS8rQ74RKVuBm4GuPruxxlx4YRw1hXzGgssS1o2na57GvCc+YWF1rVnnXJGKaKY5JdBDM0vwFVVUWfmzGL1/JMps/pHVlVhJ9niMroM04rU4CsX6Or3dRdgb6Anaq1fAV4BePXbbQEDTjSufv+Db0refzoePNPukSZnlCJKSX5FUGOXFLOdDlrl9AKMyS/g0Ky+2yWz+kdQsG8Pw/tkG12GaUVq8PUZMEkp9Q6eRtUS6ZcID7u9jJ43PV/79Zopt5KU1S0k0/Gh5H9GWZ2/i849+gCw7bXJRpYlRCCSXxHkn2G+/IJQtESEkLJIfh1B3rolXHpsltFlmFZIBl9KqVnAKCBLKZUL/AWwAWitXwI+B8YCW4AK4IZQHFcc4jtbLMo/wJopt9Y+XlN68Mg/rF21zak1JQdRFs8dQdpZzc4pnin9+nc7Hnj3kdq7Hf2fp91uEtOzal/3SPa8/QCuylIKvE36DnshD10/Ts4gRcRIfhmv/myXL8Oam18OeyEJKqHO3Y6Hsqnu3Y61j9fLObd2Y0vNxJLcqnZ2/nAOznmWAm/W+fILZBasIn8Pg3oeZ3QZphWqux0P29jgvUvImHniOOE7W9yzY3PtmSI03g+hlKqdDVMJFlKTPf8rpGb1DSow6oSo3VJ7DF+/V33umgo6XvsMtkTPrd6+s0g5g4wfjppqcn/9iZ0rvuJ3I16P+PElv4znP9vln2HNz69eQQ94DmVYDUX2Qr+rBoGvIrsdVXTx3jkps2CHWCsKjC7B1CJ12VGEQf2zxT07NuNyOqnKzyU5q0uD51sshwZD7spSqr96DoCuPYIPLB//13li0gTs3mP4ajzcGaWzphqX08meHZspyj8gZ5AxrLK8jA2LZ1Oxcw3JqoaRR7fn/quONrosEUGB8gvA5XQ0+jPhzi9oPMP8G/5Rge7BoDa/fM+P1wyrrqokvZUMLw5H/nSiWKDeCEdNNc7i/YeepBR7p92Bw15I26z2tQ+HOrACqf/6gc4otctZeyuZBlSCtbYp3/fe4v0MMhZordmz5Rd2LvsSS1UhmZYqbh7Rl0HnDcdqtRhdnjBAk3q7vPkF1MmwSOQXBBiI1c7q60Mzb/UGYr73Ec8ZtmvTOs4Z2P7IT4xjMviKMo32RtiLAM898drtPBQMaFKTrSGZjg9WoDNKXVWGq3Q/LjxnjQlWz6rTNfai2vcmfRTRyemoYf2SeZRsXkZCdRnDe7fjDxf0pkOmzHDFqyPll0/9/ILQXFIMRmMzYu7K0gaXJqvyc6kpK2yQYfGSX/tXf82JF3U3ugxTk8FXlDlSb4Q1MQmX1Vrbd1Cd1Z7/mzbbmGIPwxdAD10/rrZWz/vxXi6tt0aY9FFEh307NrN3y1pKNi+jnbWKy0/pyckjT8Ams1uCpvV2WaIov6BhhoHnxiNLatsGGRYv+eUo3k9224FGl2FqMviKAo33RjgPPaneHT/V3un51FRzb2qamppWG0h1eir87pSUPgrzcjpq2LJ2JUWbluIs2MnQbhncdEwHhp57WoPLMSJ++d+N7Z9fzppqrIlJUZtfEDjDasoKsWV2BuKvl9VRU01OmpxsHYkMvqJAU3ojEtOzGTzpRcAzO2TGs8VA6p9B+r9Pf9JHYR75ebvJ27qe/WsW0M5WzQXHdWLwuZ3onN3H6NKESfkyzD+/Kg/sqr1/MFrzCwJn2Jopt9LpqieA+Otl3bzqey44TjbTPhIZfJlYoLW7asoK2fP2A3S+6gngUNOnw15Y+8scDWeLgfifQToC3OK95+0HcMRxH4WR9uduZ/uPc6nZv5X+OSlc1CebMybL7JZoXKD+Lv/8UgkJOAr34LZaYyK/4FCG+eeXr5c1XvKrcPNyBkm/1xHJ4MvEAq3dVXlgF/lzPHuaWaw20/dGNMeR+ijcNRV0uOJxUtof2kQ3nvooIqm6qpIdvyxn57IvyGml6d7GykMj+tCz05lGlyaiRKD+Lv/8Ss7qUvs7HAv5BYfvZY2X/HKWSL9XU8jgK8qohARc9qLa5SOiqTeiOQL1UbjsRaiEBIMri13F+fvZvGQO5bs30DZZM3pAB56afIrMbomQ8c8vONTfJfkVG1xOJ52k36tJZPBlQvWbU11OJ5UHdqESEkjO6kJiWiaDJ70Ydb0RzdFYH0VyVhecNdVoaNDEGmvT9+HmqKlmz6a1Q09W6QAAIABJREFUbPtxLm2tVbRPcXPHaX04+qIRJMT4PxIifBprrvct/uzLL4i+/q6mOlx+Qew24a9bPJvfDOt25CcKGXyZUf3mVN9gw1G4h+r8XbX9EbF2ttiY+n0ULqcTlWAlwZpIUlaX2ibWWJu+D4cKexmblsylJHcjrZzFnNanHQ9eN4DWKUlGlyZiRKDmemdNNTX18gtib8Y+kMP1gfnnF0R/E37J5uUMP+Mko8uICjL4MolAy0k4nY5Dt2IDbu/6N7HSH9FU9fso/Hvg/BczlMVYG3K7XOzfuYk965ZRsWc9HZKdXHdqH44Z2Z+01slGlydiSP0ZL6fTgaOmGkXd9QfjPb/gUB9YLC3G6nTUkOSulDX9mkgGXyYRaDkJS0o6e6ffhdVqA2K3P6KpfGeQ/uuB+dbT6XTVE7IYq5fb5WL9ki8o3rmehLI8hvXI4PrhPemcfRoWi1xOFOFRf8bLkpLOvrfvRbucWK02ya/DrAfmW5Yimpvwd61bzugBOUaXETVk8GVinQMMKOLpjLE+/zNI/4GqL7jiWU11Feu//4LizctI13YuP7U3A4Z0pX3b/kaXJuJU5wADCskvj0DrgUW73Su/4sGr5S7HppLBl8GOvJaXaIo9bz+Aq7KUAr9Zwmidvm+qvds2smvFl9Qc2EZOuo2rT+zGCWeeLBtVi4iqn2E1ZYXsfuOPJCS2kgxroj1vP4C7pgLtclLgnSWMpvyqrqok3V0qrQzNIIMvgwVay6sqP5d9sx6M+eUkWirQYqyuylI6XftMbX9cNE/fN8ZRXc2GlYspWLuQDMrp36UNN5zZk245Zxldmohj9TOsKj8X7Xaz/90/18kwya9DGtxEVFlKx2ufqe2Pi7b82rn2R84eLKvaN4cMvkwoXpaTaKlAi7EWWG21A69YciB3B9uXz6dyzwayW1sYP6QjI285UZpahWn5llOQDGtc/Sb8AqsNWxTn164VX/H3W+Uux+aQwZeBnpg0oc5aXof2atRxt5xESwVahsJDR+UaYI6aarb9spIDqxfgLi9gcJfW3HdyT/p0HW10aULUEWg9Qk+GaSzeS2eSYYcXC/lVVWGnfWK1nBA2kwy+DGS3l2FLzay93OhTnb+LtnF2O3ZLHW4ZCiAq1gArOriPbUu/oGDXJrJsVZw9oAMX3DBIwkyYWqD1vICY2zIonGIhv7b9/D1jj+tsdBlRJySDL6XUecCzgAV4TWv9ZL3vXw88BezxPjRFa/1aKI4tRLRxOh3kbl7PnlVf4SjOo0+WjT8M783gi041urS4JRkmRMvkrvyaMXeONLqMqBP04EspZQGmAmcDucBypdRnWuv19Z76rtZ6UrDHiwX+0/WWtGx2vXYbAEopLBYrDnshXXv0MrjKhsqKC3nnqT8x4d5/kZrR1uhy6vBfAywhJR2tNQAJtuTa6fsnJk0wbOq+vKyEAzs2sH3pPFJdpZzaO5MHLz+GVsn9gto7Mb/Yzi1PzuCVB66hXUbrEFYcPyTDmqf+5Ua3srDrtdvq5JcZG+yjJb9sqZm4XE601nXyy4yXHw/u3cVxHW1BvUa8ZlgoZr5OBLZorbcBKKXeAcYD9YNLePlP13e67unax/3vcDHTL5jP8rnvYt2/lmWfv8OZE241upw6/Kfvk866vcGlXFtqZu0OApHgdrkoLythy4/zKN2xmo4pLo47Kos/Xz+YVsmJITvO9DlLKNq3mzdnf89dV50TsteNM5JhzVD/cmMXb4aZfT2vaMmv+ne/g3kvP2794XPuO6V3UK8RrxkWiuWuOwO7/b7O9T5W36VKqTVKqQ+UUl1DcFwRQWXFhWz89mOevrgzG7/9GHtJkdElmY6jpprczWtZ/NZTLH3lXhzfPMNNfcuY9vtTeOrGUUw8c2BIB175xXZmL1rOi5dkMXvRcgpKykP22nFGMizGSX6FhztvPX27tW/xz8dzhoVi5ivQNRNd7+v/AbO01tVKqd8DbwJnBnwxpW4Gbga4+u7HGXHhhBCUaE6W5FbsnXZH7ddmna4Hz1nj6G4u/v75LkZ3b2PKs0fwTN/vfufPtdsP+ViSWwE1IT2W1hqXy8mG7z6ncNNyMrAzuHs7Jl3Uh+wIrCw/fc4SxvQES00pY3ra4u7MMYRClmGSX5JfwQi0hRr48stc1i7+nHHHB3cOEs8Zpny9MS1+AaWGA3/VWp/r/foBAK11wKWNvf0VhVrrjCO99qvfbguuOJPx75XIufLx2sctFgs5XXuadrq+rLiQGQ9cybldKvlhcz7D+2QxLzeFa55813S9E3Bo6n7da3fjqqqofdxhL6St9x+HYC7r7t+1hW1L5+E4sJVO6RbGHd+dE4/uQlJicL0PzZFfbOfye5/l+bPB6qzAaW3F5Pnw/lN3RHffxCmTW94A10LhyrBYyy/wZNjuHVvr5Bd4Mqxi3r9NkV9bf/6eyrLi2q8ry8v5+q3/cGy7GjbllTG8d1tWFqebNr/Ak2GVJNbJL6C2H9gMbSlf/2cys+4+t8U9qzGZYTmDoefIJv2BhGLmaznQRyl1FJ47ga4EJvo/QSnVUWud5/3yQmBDCI4bdQ53a7aZ+c4al2wqYuq4VG6bU8TofommPXv0cVVV0On6/9R+3dJVo90uFxuWL6Jw/WKoLGZI1zT+ckYfuucYt/aW74zR6qqie1srO4urGNMzJa7OHENIMqyJDrc8jlF2b/6F3FVf48zfQYc0G8N6tuOYrocGVdPn/MC53R2s2GHn76NT+NOXRXRoW8mMh69j8Mkj6XTcaHocM8Sw+htTP7/A8+ds/+o5gyo6ZOOyb5hwSvegbhaK9wwLevCltXYqpSYB8/Dcpv261nqdUuoxYIXW+jPgdqXUhYATKASuD/a40SyapusBNv/0PYu3FnJZH8Dt5rgOMGt5ITmF35ty8FV/4UIfi6Xp62bl5+1m+/L5VB3YSStXCeOHdWXUdYNJSQpdz1YwFq7axK/by3h1SQ3pyYrSKg1WB0cf3BQXwRVKkmHNUz+/gIjfoe2oqWb1l+9SvXMVJ/fK5JZzetE5O3Dj96Zd+/hlayXjeyfQu51ibF8Ln25xMTDLwZPjOjFn2WwWf/MGrtQc+p01kfadu0fsfTQmNTWNovytDQa1zcmwcNq1bB6jbw5uRft4z7CgLzuGUyxN2/um6wP1IqVQY4rp+sb4LjtOHdeav3++iwfP78Zt/ys39bQ9eKbuW517Jy6Xq87j+975c4Ope7fLxdZfVrHvpy/R5YX0z0nhopOO4pgeOZEuu0l8U/ZvX5pKeVkxqeltmPiBPbqn7MGQy47hEmv55WuZqN+LNOCmpyPWMlFdWcGar96lcvtK7hg7gOOP7nbEn/H9rrx3uefk9oZ39lLsbs0n/76rzu9KUWkFr8xbw9o95WQOOoMBp59PQkIo7klrmebkVyTt2bSGjI0fcOfFwQ2+8ovtXHr3f3hqlIOhPbMprnRz+Xtl0Z1hEb7sKJrAbi8j58rHG0zX7512BySb+69h+dx3uaAPfLW+kLLyCr5aV8gFfZJMf9kRwOVyNbrsRHHBAfbt2ETu8nm0UXbOGtCB86/uH9I7EsNl+pwljOudgHJW4nLUgKOScb0T4mbKXkRW/c2zferPgIXTrl+Wsfmrt3jw4sEMvnhMk3/O97uSlWrlmYUFlFc5yGpd1eB3pW16K+77zckAzP5xI28/fyftBo1i4IgLsFiNyejD5ZdRNsx/mzd/Pzzo15k+ZwkjO9eQYXWRX2wnp116XGWYuf/VF6aw+afvWZ5XjqusgOfGJHP73P1Y09qRsc+clx0bo7UbR+EenOXFVJTXYJ//b0b2bMf5t56IxWLcGW5LLFy1idx9Vfx7YSmZKQkUVlaQ3TadLvvjY8pexA+tNT/O+jcD08pa1OC9cNUm9h6oZvrP+ygts/P8mCQmz62kcumGRn9Xxp3cj3En9+PrVVt59fk7aDdgBANGjY/qza9DYevP3zOyZyuSk4K/uWj+sg38srmMT9f58quShARFpzjJMBl8RYBvA+3sOptng28D7dQs861m7++Wf87gm1kv0jfvY04emsW15fls6nix6Qdeqalp7Jr1IMqWjLvKs36MstpISE7DUeVi7verefTq2wyusmU+e3oSz7z9JexZyV0jMnjm2xLofHxchJaILF9+1d08G3z5Fc7Ns4vz9/PjzH9x3/l9ObFfvxa9xmdPezYl8P2+nD80g432Euh8zBF/dvTQXowe2ovvf9nFf6b+id6jr6Ln4OAutzWV/7I5vhXvwbNrhxEr3ldXVbJn0UyeuKvps46Hc/aJx3B254q4za/oOt2PUr47hFrl9KzzYbHaaJvV3hS3DR+Ob4HCCUMzcLlcnNuxjF8XfmDahQrLy0r4+cv3GD6wB6lWaHPcGHpPfp2+d86gz+Q36PW757CltiW/rNroUlvMtzjh+f2SufSN3Yw7OjnuFikUkeF/h2Og/Pq/abPDkmEF+/ey/t2/M/3W4ZzYr1NQr+X7fZlwbCu25B5k4pBWzfp9OXVgN96562x65M1j/ksPU3RwX1D1NMUDU2bRNqs9gye9SPvx99Ltpql0u2kqXa57unbF+0heflwz/13uvGBIUHc4+kh+yeBLNIGv56tdaxsVJYW0tVZzdudyln3+jtGlAZ5m+b3bfuXbGf9i5bSH2fvx41xzVBHTfj+czLYZtO41jASr+fu4msPXxzJng52i8hpmr7fX9ksIEe3y83ax+cN/8NKtZ9A6JfhLfYfrkWwqiyWBW84/nheuHkjeZ/9g6Sev43a7g64tGuTn7SatYC1D+wY3CPaR/JLLjhFjhtuzW2rzT9/z04EqZv28i/LifDJTFIWVFbQvXmTYpUdHTTWbl84n/9dl2JzlDO2Rzm0XHU1Wm7qXErLSklj9zp+xpda9KzMxOQWwR7Di0Fq4ahO78ir9eliKSE9LpVuc9EuIyIpkfh3M3c6O2c/y4q1nkmgLzT9RoeyRzGqTyr9vOYsf1u3imWfvZOgVd5PV6ch3XbaEWVa8//n9f/PSTcE32ftIfslSE2EXDStCN5Wv72vC0Ax+8/Kv9DnnBsbccFdEjq21pnBfLpu+n43Vvo/EmhIuHt6LU/p3OeLaWz2vfoZiUqmpqqzzuMNexLFHZbPsxejs+/rbf2dTuul7nriwM899XxYbPROy1ISpRDq/yooLWP/2o7x421nYrKFd08rX83Xt0Nac8/JuLj5vJA/fOC6o16yucfCXt79jX1JPTrr4Jqy28Myw33b+MKxpWQ0ed5blM3XOirAc02ft4s8ZYfmFy0YMCOnrxmR+yVIT5mHGFaFbwtf39ZcrMnh/+QEyE538NG8Wp19yQ9jW+tJas2nFIgq3rcWZv53+OSn8ZdTRdM85sdmvVVNVSdcb/l3nscqDueR/9c9QlRtR+cV2Ppz/Ay+d4/n82qGtufy95Vw37tToXSNHmE4k88tRU83St//J0xNPCvnAy9dj9N7laUxfUUJGopuZXyzhD5edEdTvS1KijSdvOIM1W/fyf1PvYcCFt9Kx55Eb+ZtLq4QGq90D7JxybciP5S9v2wZSdy7i0utOD+nrSn7J4CsiovmSo4+v7wvgq/UFTL0glYkf2Fn80Rshnf0qOpBH7ua1FK77jmRHERec0J3jRnegWxBb+WSlJbEn/yCVB3PrPG61KKK1Y+PFDxcyuquDIZ1S2FlcTq82qXG1Ro6InEjl17fT/8FTVwykS3abkL4uHOoxApi9rpSXLmjNFR+U88IHC4Ke/QIY3KsTs+7K4e/vvsXiJe044ZJbSG6VGvTr+iSohIAD3gQVvrbt8rIS1v/vJf77h5EhabL3J/kllx3DKppXta/v5Xuvxn5gF/ayMi7r4+CmoUm8tqqaz/KyeODNhS1+Xa0129atomDLT1TkrqdvdjLnDenMkD6dQ7pRdaxdehxy9WNUlBb7bcuRSHrrZDq1z6q9tT4qyWVHU4nU5a5fvnqX01rt5jcjQj9rBHDh3VPYeyCfwrJKxvd287vjk3h1ZTUL8lL5+e2/hPRYG3fu57EPf+aoMybSfdDJIXnNSG+0rbXmq5cf4bkJ/clplx7S14YYzi+57GgO0byqfX23/HNG7TZDvz8vA4ANxVtITbRgLylq1qVHl9PJlrUr2LtyHik1hYwe0IHjT8ih32Xh3ag6Vi495hfbSU1UdG2fyLQJndFaR/+2HMKUInG560DuNmx7VvKbG0eG7DXr++zpSbXbDD10nmdNsi3Fe0lLSqCgpDykvzf9undgxp3nMOV/81i0cgHDLrmV1unBz+ZFcqPtpe89x+2jOoVl4CX55SFLTYgm819y4v3lB6iuqqJLYmmTlpwoOriPVZ+/xTdT7mHd9Ac4pXIRr99wLP+9/Rwmjj6Wft07ROAdxIbpc5aQbauivMrBm8uLyUq1xt1t2iI2uF0uVn80hUeubH4fZ3P5bzM0fUWJZ5shW1VYfm+UUky+cBiPnteRn6f/hc3L5of8GOGyZeVCBqWWcNrA8NzBKfnlEV3TL1HIYrE0uFYfDavaB+JbcuLtn3b6bTVURsayhQ2WnHC7XOzavI6t331KmquEzm1s3HziURx34RmG1B5LfV/zlm5g897K2lu0p69xYLUkxM22HCJywt1r9PPn07nlzJ6kt04JyesdTku2GQpWry7ZTLvjHN6Yv5Jv/ruE4y6dTFqbzCP/YD2pqWkU5W9t8HdhsYT2xoTNS+fTLm8xd1wWvlX8Jb88ZPAVRsUH92H/9KkGjyvtNv2q9oHc8s8ZwKElJ044ti0X7drEzt6eX1R7SRHbVnxD/sblZNhcnNwzgz/fMPCIS0FEwrIXbyPl3IfJn/10g++5ysy5Un9jTh3ci4N5uZzcrxOT7PbYuEVbmM4Tkybg1m4KZj9T53FLcisy2jXsA2uufVvXk1W+idFDTgv6tZqi/jZD5wxO49Kde0jvF/4T4RvOHsSZ+wp5fNZfSeo7kv6nj2vWPpEPTJnFQ9ePo/irlwL2fT0xaULQ/6ZsWDKXrgVLuTOMAy+Q/PKRwVcYGXV7cDj5LzlhLy7ghA5OZn7yBpaS3RzVzsYVJxzF8LNONeVG1W5loUe9ni+Abc9fH/liWii/2M6seUvISXTz5vJirh2WEXe3aIvICHfP6trPX2fm5BFBv05z+C85UVBi59J+8Psvf+APlwa35ERTdM/J5JVJ5/Dt2p1MfeFe+px1Nd0HDGvWnYTh6Ptyu1z8svATjq5Zz20Xn9Di12kKya9DzPcvZAzxTdnX/wjn7cHh9sNnM+iXWs607/byt7l7ya+ES3q7GNCmgn/eMJLTBnYz5cALICFBUXkwt8FHQkL03GD3wgcLSdLVvHRBa2avK0UpFZf9EiIyfG0T/h8Oe2HQG2lvXf0jZ/RJwxri9byOxNf3lZGcQElZOUM6JTK6q4MXPlwQkeMrpRg5uAcz/ngmXXPn8vXLf6G44ECTfjY1NQ2HvbDB30cwlx6djhoWvPJnzsnYxW0XDG3x6zSV5NchMvMVJr4p+/osFktIpuwjRWtN0cE8dqxcSNnu9axYMB+ru5plLicWt4vV+1zklrjIObCKv950odHlHlbHzDQSLAqnq+4KAG635sRbp5p+uYn8YjtTPljAJf0ULu3m2A5wwvO5ZKalxF2/hAi/koJ8klyuBo8nqISgLnFVV1Wy9esZ/P2ec4Mpr0V8fV8v/VgCzhpa2fDkV+4KHv5t8Ot9NVWizcrvxx3PJUVl/PPDf7BGt2XA2N/SrkPjeyc+MGUWky5ouHSFy+WipCC/2TXkbdvAL59O5YmJJ9Crc2T+TXrvq+W4HC4KKpxxn18y+AqTaF7ZXmtN/p4dbPp+Du787fTrkMQ9p/Wl8znDSP7D8Npbtn2rRf9vbTH7ampCfst2ODhdmpTsLnUes6W2Jb+s2qCKmu7FDxfSNVXTOTON/ke156FsJ6vj8BZtERlu7W6QX77Hg7Hmy1k8dPFgEhIiP0Puv+SEGfKrfds0/nXTmRSUlPPPD5/mF0cbBoy5lqxO3QM+PxR/J1UVdpZ+8CL9WxUx665zI3alIr/YjsvlICfVwo8HUnjovJy4zi9zXh8SEae1Zv2PX/Ptf//K0hf+iG3Ff3lsdAbT/3gmD115Kr26ZJOc5Fn0NNBq0Um6mhc+iMzUfTzybcfx7NgUFmwpp6DcFbe3aIvoVV1VSdWOVQzp09mwGsyYX+0yWvOP357J8xOOpmrRC3z98sNsXrU4pMewlxSxZNa/Wfvmgzx6TjYPXhnZ3ly55FhXSGa+lFLnAc8CFuA1rfWT9b6fBEwHjgcKgCu01jtCcWwzM/u2QgX79rB16ZdU5K4jp7XirEEdGXXtYFqnHP4uHN/U/ZQlxYzvDS7t5oweCXz4zcqQbNURLllpSax+58/YUusuCJuYnALYjSmqiabPWcLZ3V1kpljrTNcDcTllH2qSYQ0p7W6QX77HW+qn2W9y/8VDgikraGbOr7bprXj0mhGU2Cv56qcfmTv1faqTMskaNJKjh41o9t+J0+ng12WLKFi3iAx3KY9eOoxuOX3D/TYC+mjBSs7okSAtE15BD76UUhZgKnA2kAssV0p9prVe7/e0G4EirXVvpdSVwD+AK4I9tpkVH9wXcFsOI5eZ0Fqzec1y9q36EmvFQXp3SOXu4b04+tLmrb1Vf7XorFQr92Y6+PylXDbvPkCfru3D9A6Cs+zF20gZ+yhZ4+5u8L289x42oKKmyS+28+k3y3hqVBJ9u2XzULY7rqfrQ00yLHKq9m2k/1FnGVpDNORXRmoKl57en0tP709ldQ0LflrJV2/NxV1TibVDL2xZXUlI8vzuK6UoWvAGv676AQCt3eSvXYS7poJ0yrn4hK6MuPH4kG7X1lwbd+6nssLOfed0ISfdJi0ThGbm60Rgi9Z6G4BS6h1gPOAfXOOBv3o//wCYopRS2swbSwbJLMtMFBccYNuKb7DnbiSpupBzB3fk3IlHk976uKBe13+1aADlrGRcL7j3+ff5+J/mbVx3uxv2fPkeN6vpc5YwsnMNGVYX+cV2ctqlx90mtGEmGRZAqDNsww/zuXiocZcb/UVTfqUkJTL25GMYe/IxTP1gAZ3G3Epl7gZwe2a7XDWVaGcNF9qW1v7MsVf0oU1aK6NKbuD+qR8wrhfgqARsdVom4jXDQjH46gzs9vs6F6i/Slvtc7TWTqVUCdAOaP4tGlHCiF3owTO7tWvzOnYt/Rxn6QF6tbNx47BuDDt/WEiP45u6n7n2AG635mBRKZkpCRRWbTd1432CdrH7jTsDPm5W85dt4JfNZXy6LoHCygqy21aSkKDidro+DCTDAghlhrldLnJ/+IRL7jkvFKUFLZrzK3faXQ0et9lsjDyujwEVHVl+sZ0V67azLVnz3vr9tfkF8d0yEYrBV6BFkuqfDTblOZ4nKnUzcDPA1Xc/zogLJwRXnUEy2mXRuUfDX4bqMCwzYS8pYttPizi4eTWtnSWc0rst91x2NG3SBoT8WD7+O8/7VoyefEoaD/5vDy98uCCit203R8fstgz4XcONtNe92vBSpBnkF9vZX1jGzadk8fBZbXnm25K4XRE6jEKWYbGSXxDaDNu3cyMjjmlvyB2OgQTKr7tGZPDUwiLOnvxv5j9/pykHYNGWX+BptM9upZj/++68uTJ+V7SvLxS/CblAV7+vuwB7G3uOUsoKZACFgV5Ma/2K1nqY1npYNAdXOGmt2b97O0vefY4FL9xH3mdPcln7Pbxz6zD+O3k0N44ZGrEpZ9+K0dcObV27YvSHX/5AQUl5RI4f6178cCGqshBcDgCuHdqa2YuWy59vaIUswyS/Atv81Ux+e/Zgo8towD+/AMb2gtKiArlzO0R8K9pn+Fa0l/yqFYrB13Kgj1LqKKVUInAl8Fm953wGXOf9/DLgm1julQiHqopy9m7byKLpT/HDS38i9ec3eHJMNm/ddhr/vGEEIwf3MKQuo1eMjmWyvETESIaFUXlZCe2sVbRKNn6P1/r8e78cTjdWVxU3HZ/IzC+WyAAhBGR5icYFfdnR2/8wCZiH5zbt17XW65RSjwErtNafAf8F3lJKbcFztnhlsMc1u9TUNLa9Njng402htabCXsrO1Ys5uH4pmdZKBnTOYPJl/chM7x/qclus/orR6cmK0io3Zb8sjMh+ac2VlZYUcIo+K63pm9xGQn6xnXMm/4fTOjnJTLHJ8hJhJBkWWLAZ5rNm3kweHme+WS+o2/tVWl4FzhrSkhSqpsaU7RPRkl8gO3IciTLzydur324zb3Fh4HI6KTqYx6bv/ocrfxu9M22cckwnTh3QhZQk8501+vivGJ2VamXd7iLGT8/nuotGG75uTrT6239n88ncb7hoUDoPn9uefLuTy+Ph1uxTJkfPRptHEG/51Zhvp97N9NvPaNYG0pHmn2HO6gp27C/h918m8PXLD8b271sYxWWG5QyGniOb9D+6Obof45jWmk3LF7L49cdY9do9WH58mUfPSGP65DN45KrTOGtoT1MPvKDxqfvpc77ngrunyvR9M+QX2xl39xQ++HKJXG4UUe/Anp0M7Jhs6oEXBG6fOCWnhjP+8LTkVzNJhjWN7O1ogJKCg2xc8jm6YDuUFzDuhB6ceVV/0lunGF1aiwSauk9PVjhq3OTv3RnXa7k01/Q5S9i6bSfn9VJkpiTK5UYR1bYs/pRHzz7G6DKOKFD7RF6piw6pRZJfzSQZ1jQy+IqQrWuXk/fT17jKDtKznY27TuvDMT1OMLqskPDdtu0/dQ9w6Ru7uP80K08uWMZ1406N3anmEMkvtvPR10tpl6K5YkAifbvlyGr2Iqo587fTrYM5tlM7nPobboMvvxIlv5pBMqzp5LJjmJQVF7Dqy/dZMu0xfnzhjwwq/IpnLu3JjD+eyWNXn84xPXKMLjHk/C8/Tl9Rwvh+No7raOX0nCrOnvxvmb4/DF+DfZuESi4Giz5OAAAVCElEQVQ52kaPthbyi+0yVS+i1t4dmzm+e/Oa843UWH6N7FTDCx8s4NL7X5IMOwzJsOaRma8Q2r1lAwc3r6Jk2090z7Dw25O6c8LY440uK2J8U/fTf95HaZmdty5OprDSzXlHad5Z7Vk7RxrwA/Ot57WzXDGzFGasqeFgRZmsZi+i1o4f5vC3c43ZxLklGsuv83sncMvnP5Bm03IJ8jAkw5pHBl9BKC8rYf/u7exaOofEyoMM79WWi/vlcNwFzduoOlb4Lj/6Vow+tX8GDqebbbn7uen4RN78Ygl/uMx8y08Yzbee15SxKTyyoIb3r+9Gu9YWWc1eRDVX/g66tDfnljeBBMovAIfTzeiuu0hMSmH2ouVyCTIAybDmk8FXMx3Ys5O8X1eQ/+tSuqTCKX2zeXDi0aS1HmJ0aabRWAN+Ei5e+GABP2/O5ZUHrpEAQ9bzErFp9+Z1DO/T1ugyWsQ/vwBvhjk5un01Y3onmnr7oUjLL7Zzy5MzGNKnC2d3d5GZYpUMayIZfB2B1pp9O7ey+btPSCjNY3DnVlzZryOnnTvS6NJMK1ADflaqlXy7k7Nek+l7f76p+g4Z6fQ/qj0PZTulOVVEvbxfV3L5sZ2MLqNF/Pd9DLSG4cvfSwuFz/Q5SyjM28XMrbtYfEsnslKtkmFNJA33AVRV2Nnw/VwWvPxnVrx2P5nrZvCfi7vx5u1ncvelJ3PawO5GlxgV/BtYATKSExjd1cFp3W189PXSuF0DLL/YzqX3v8SmXQdk+yARk8p3rmZgz+gcfPmTNQwD863l9emCZfxtdCuSdHXtWm6SYU0jM194ZrcKD+SxafFnULSDNIuT847rypm/G0ZSos3o8qLW4abvs21O9sXpGmDT5yyhaN9u7pvyvkzVi5ijtaZjqtFVhIasYRiYby2v3wxKoUNKAmf0SKiTXyAZdiRxO/hyOh3sWPMje1cvwlpTSt92Vv4y6hi6dTjN9KsxR4vGpu/Bs4bOIyM8a+iMO30ID7zwUcz3geUX27nhb29SXFTIU+e15eqZW3lxUndy0m0yVS9ixvol8xg7pKvRZYREU9YwjKf8uuXJGTzxh0tq1/Iae5SLozq156HzMiW/mimuLjuWl5Ww+puPWfz6o/z02p84oWopUyYczRu3jeSBK0+le06mDLzC5HBr6Nw35X2K9u2O+Wnq6XOWkL93J1m2KpbsqGTiQCs4KgGZqhexo2jbGob0iq11DCW/6s7YZ9uqZC2vIMX8zNeO9SvZv2k1VXvW0yUdbhnRjx6nDiStdbLRpcWVxtbQGdsrgZkfbmXGVZ24Z95SFqzazLRHro+Zsyf/s8VPv1nGIyOsPPldNfM3udhf6uKNn/fXroMDMlUvopvWGlVRQPu2A4wuJaSakl8Pfr085mbB/PNr9qLlPDe+HRe9sZUebW3M/MUpa3kFIeYGXxX2Mnb8soyCX5diKd/PqP4duO7EjvTtFp9rb5lFY2vo7CsoZeLAKpZsryTbVhVzfWD+Z4sjO9dwXEcr4/tpSGzFXaPayTo4IqZUV1bQNd3oKkKvKfk1rreV+6a8T8mBvTGTYf75Na53AlmJNUwcaKVNeip3jWoHIBnWQjEx+CotzGfT8q8p27KSTqmay07oRv/L+9A2/VijSxP1+Dewut2ag0WlZLdKIDvVTpXDFRN9YI2dLd46rhWFlXBSF8U1HxcxfY0DqyVBzhZFzNiy4hvGD+pidBlh01h+dWlTzlMXdOCF77by6Q1dmDw3umfB8ovtXP+3aZSVFDPVm1//uKkrZRUlnNvbWie/QGbsWyJqB187N65l97K5qNI8umQmc8uJ3Rl6vqy9ZXb+Tfi+s8i7RmTwzMICqKmo00fhO4O89vxTuOXJGaYOMd+A65UHrmn0bPGXwiTOGew5W5yUL2eLIvbkb13D0BOiZ0uh5mosvwCeWVjAxIFW2iXWMK53QtRnmO9uxqxEKxMHWpm93s5do9rTB8mvUIiawZe9pIjtP33L/nVLyGmtOLZLa+64uC9ZbWKrtyCeNLWPory6prah1ay/7L4B1wsfLGDB0tX83e9uxrKSIjlbFHGhtS6nTVoro8uICP9ZMKfLXZthZRVVTDi2LS9Mjd4Mm/f9T7V3M5aWV0l+hYGpB1/7dm1j8+JPoHgPOelWLjmuK6P+OMLoskSINKWPYlTXBD788gfenZjFrbPNNZVf//Lii5dkccXMH7h0YKs6dzP26SpniyL2lRQcpHuaNrqMiAk0C1Y3w6xRm2FdUjVneu9mxJZM327tJb9CzNSDr/TV0/jXhf1p37af0aWIMDpcH8UJXZMZ3dVBr3Y2U0zlH+7yYs9MG6O7OsDlYOGOKnKL5W5GET+K8nZydHYMdts3Qaxl2JdbXcwsRe5mDCOltYnPVJY8b+LiRDj491Hk251c/uZunh+TRLuMVLQ1hdOn7vRO5VdzxknHsmDxD4w7e2RYQ8w/rN6c/T2z5y9i1GnDWbhsNc+NSeGiN3L5/KaupFuqKSixM3luNe9f3412rS1yJ1BznTI5Zhbae/XbbXGVXz+88yzPXdKV1FZJRpdiKLNlmH9+tctozTNvfykZFi45g6HnyCZlWFCLrCqlMpVS85VSm73/DbiNvVLKpZT62fvxWTDHFLFt4apNzFxbzbCpBzhxyh6O7QBKQVlFFcpZWTuVP6YnzPxiCS9ekuWZLv9oYZ2FDn37JxaUlDfp88P9jH8vhG9qfta8JYzpqWqb6Wevt3tqVNRuFTRs6gFmrq1m4apNRv6RisOQDAudysI8khNNfTElIkKRYcFmVqD88r2uZJg5BPubcj/wtdb6SaXU/d6v7wvwvEqt9ZAgjyXigH8fxYV3T2Hx/nwWf453Kn9/7VT+E+em8faKarJaWxnVlTo9FdeNO7VO4ABH/Pyuq84J+DNT31/AwmWr6/RztWtlIUlXM7ZXa8oq/JtRU7FarICVgb2y6rwXYVqSYSGS1dqK1WoxugzDhSLDyqtqWpxZjeWX73XH9U6QDDOBYAdf44FR3s/fBBYSOLiEaLbGbut2ON1sy93PVYNsvLm8GOV21emp8N1x+OIlWdz4yVIsCYpXLsni5k+X4daa1+p97muC9Z0R+v/MRW8t4bfHp9bp55q+ooSrBtmwuqo4qmsH+lgSpBk1ekmGhUB+3m66xFGzfVO1JMPG9ITXv1jCJ9dkN5pf9TPL/3uB8qtf+yTG9Kzg9S+WsPiWHMkwEwh28NVBa50HoLXOU0q1b+R5yUqpFYATeFJr/UljL6iUuhm4GeDle6/g5vGnBlmiiAX+Da2l5VXgrCE9WdGulR2Hy83zY5LIL7Zz7dDWnPWaZ4aqX/sksm35DOpgoV/7TEZ2LmLtfhf92rer8/m43lW1Daf+P9OuVXrt2WFBiZ3fHpfI5LnlJFoSyK9w8urKGrDmku7dqkqaUaNSSDPMP7+uvvtxRlw4IRw1m05JwQGOax+fzfZN1dQMG9uL2hmxbFtVwPyqn1n+3wuUX0Dt6yqlWLilnL2lDskwAx1x8KWU+goItEvqQ804Tjet9V6lVE/gG6XUWq311kBP1Fq/ArwCSMO9qFV/Kn/vgXwANhdXMr73oZ6KHhmptTNU+XYnhfZqxp6cRFWNk7FHuVi0tZp9JY7azwvKXUw4thUvTPWs4Oz/M28sK6o9Oywtt9T2Qny6BTLT0khPgk7tZWre7CKZYf75FU8N93tWfMlj1/U3ugxTa0qGlZZXYXW7uGqQjanfFQbMr/qZ5XC6a7+36UB1wPxyutwk4uCMHgmc8HwumWkpkJQsGWagoO52VEptBEZ5zxg7Agu11oddF0IpNQ2YrbX+4IgHkMGXOAL/EANqzyiPbp/IqN6toaaCiYNt2J0WUq0uZq5xUOZM4JahFmaucUBiKyYOsvHC94W0SU/zvIj3Z67/pJL8CjelVRqsiYfODiWswifCdzuGM8PiafD11ZQ/8e4do4wuIyr5Z5j/jJjTBdcem9ggv+pn1sRBNnBUMHONg8W5cHoXJL+M0oy7HYMdfD0FFPg1q2Zqre+t95y2QIXWuloplQX8AIzXWq8/4gFk8CWayT/I9uSXkYAb8DS7+tbaqnK6SbZ6bvR1k4DNAi6XG7f35l/fz1gsCbRv6xmQSWBFSOQHX2HLsHgafK2b/iD/+a20iASrKflVP7N8nwNUOtyk2Dw5JvllgGYMvoLt+XoSeE8pdSOwC/gNgFJqGPB7rfVNwDHAy0opN56lLZ5s0sBLiBaQgBHNJBkWpPLSYqzaaXQZMUHyK34ENfjSWhcAowM8vgK4yfv5EmBQMMcRQohwkAwL3r5d2zhzQAejyxAiqgS1yKoQQoj4Zj+4m9SURKPLECKqyOBLCCFEi5XvWsuIY3saXYYQUUUGX0IIIVqs0l5mdAlCRB0ZfAkhhGixTqlGVyBE9JHBlxBCiBZTCRFdHUSImCCDLyGEEC2yde1yhnSRbYWEaC4ZfAkhhGiR6go7fTrK4EuI5pLBlxBCiBYp27uFtv/f3t2F2HWVYRx/nkwmnzO2hjZtPgaNbamG2i9KaZsra5BpEGMClfaiCAq5saCgYKU39VrxRgs2ovTC0lLQocFE0ylUQlttGyRNM06iYZBmOmmiqTGJaZJO5vViTnAI87HP3mf22nvP/wcDs8/sYT0rJ+c975yzztq9K1LHAGqH5gsAkIvPntCtn2KDVaBdNF8AgFw+/vhS6ghALdF8AQByWTH+n9QRgFqi+QIA5NKzYmnqCEAt0XwBANp2+tRJLVvMUwiQB48cAEDb3nv3DX1t002pYwC1RPMFAMilaxFPIUAePHIAAG07M/p3rfoEe3wBedB8AQDatiI+0g2r2N0eyIPmCwAAoEQ0XwCAtkxcvqwLZ/+dOgZQW4WaL9sP2x6yPWH7nlnO67d9xPZR208UGRMAOoUals+lixf0metZ7wXkVfSVr0OStkvaN9MJtrskPS3pIUkbJT1qe2PBcQGgE6hhOdmpEwD1tbjIL0fEsCR59kfhvZKORsRI69wXJG2V9NciYwNAUdSwfD784JhWLV+SOgZQW2Ws+Von6diU49HWbQBQB9Swq3ww/Ka2P3Bz6hhAbc3ZfNl+xfahab62Zhxjuj8pY5bxdtjeb3v/zpdezzgEAEyvzBo2tX7t2/V8/tA14Gn/WQBkMefbjhGxueAYo5L6phyvlzQ2y3g7Je2UJL3x0xmbNADIoswaNrV+/WLfSGPr18TEROoIQK2V8bbj25Jusb3B9hJJj0jaVcK4ANAJ1LCrnH//sFZ/sid1DKC2im41sc32qKT7Je22vbd1+1rbeyQpIsYlPS5pr6RhSS9GxFCx2ABQHDUsn2tWLtPixV2pYwC1VfTTjgOSBqa5fUzSlinHeyTtKTIWAHQaNQxACuxwDwDI7NLFCxr/6FzqGECt0XwBADL775nT+nzfNaljALVG8wUAAFAimi8AAIAS0XwBADI7dvB13b7hutQxgFqj+QIAZHb+1Pu6+6YbU8cAao3mCwAAoEQ0XwAAACWi+QIAZHb+1HEt6S60Pzew4NF8AQAyW93TrW4uLQQUQvMFAABQIpovAACAEtF8AQAymZiY0IWLF1LHAGqP5gsAkMmJ90Z0d19P6hhA7dF8AQAyCYV6ly9JHQOoPZovAACAEtF8AQAAlIjmCwCQydl/HtfypWywChRF8wUAyOTE0Gv66gOfTR0DqL1CzZfth20P2Z6wfc8s5/3D9ru2D9jeX2RMAOgUalh7Fi1aJNupYwC1V/T140OStkt6JsO5X4iIfxUcDwA6iRoGoHSFmq+IGJbEX0IAaokaBiCFslZOhqSXbYekZyJiZ6bfWrl6XkMBQEZt17Drepu3H9aaNWupy8BMlmbfgHjO5sv2K5JunOZHT0bESxnH2RQRY7ZXSxq0fTgi9s0w3g5JO1qHv46IxzKOUVm2d2RuOCuOuVRPU+Yhzc9cyqxhTaxf0v/vl213PZU6SiE8VqppIc7FEdGJwf4o6XsRMedCVNtPSToXET/OcO7+iJhxEWxdNGUeEnOpoqbMQ0o3l/moYdwv1dOUeUjMpaqyzmXet5qwvdJ275XvJX1Jk4tcAaDyqGEAOq3oVhPbbI9Kul/Sbtt7W7evtb2nddoNkl6z/Y6ktyTtjog/FBkXADqBGgYghaKfdhyQNDDN7WOStrS+H5F0R84hGvEesJozD4m5VFFT5iGVPJd5rmHcL9XTlHlIzKWqMs2lI2u+AAAAkA2XFwIAAChRpZsv2z+yfdj2QdsDtq9NnSmvrJcxqTLb/baP2D5q+4nUefKy/SvbJ23XetG07T7br9oebv3f+nbqTHnZXmb7LdvvtObyw9SZOqEpNYz6VR3Ur+rJU78q3XxJGpR0W0TcLulvkn6QOE8RVy5jMu3+ZlVnu0vS05IekrRR0qO2N6ZNlduzkvpTh+iAcUnfjYjPSbpP0rdqfJ9clPRgRNwh6U5J/bbvS5ypE5pSw6hf1fGsqF9V03b9qnTzFREvR8R46/DPktanzFNERAxHxJHUOQq4V9LRiBiJiEuSXpC0NXGmXFqbY36YOkdREXE8Iv7S+v6spGFJ69KmyicmnWsddre+ar8gtSk1jPpVHdSv6slTvyrdfF3lG5J+nzrEArZO0rEpx6Oq6QOliWx/WtJdkt5MmyQ/2122D0g6KWkwImo7lxlQw9KhflXYQqxfZV3bcUZZLv1h+0lNvkT5XJnZ2tWhy5hU1XRXHq79KxNNYLtH0m8kfScizqTOk1dEXJZ0Z2td1IDt2yKi8utamlLDqF9IYaHWr+TNV0Rsnu3ntr8u6cuSvhgV3xdjrrnU3KikvinH6yWNJcqCFtvdmixcz0XEb1Pn6YSION263E+/arCTfFNqGPULZVvI9avSbzva7pf0fUlfiYjzqfMscG9LusX2BttLJD0iaVfiTAuabUv6paThiPhJ6jxF2L7+yicBbS+XtFnS4bSpiqOGVQb1q2IWev2qdPMl6WeSeiUN2j5g++epA+U102VM6qK1aPhxSXs1uTDyxYgYSpsqH9vPS/qTpFttj9r+ZupMOW2S9JikB1uPjwO2t6QOldMaSa/aPqjJJ8rBiPhd4kyd0IgaRv2qDupXJbVdv9jhHgAAoERVf+ULAACgUWi+AAAASkTzBQAAUCKaLwAAgBLRfAEAAJSI5gsAAKBENF8AAAAlovkCAAAo0f8A09vQeS919SAAAAAASUVORK5CYII=\n", "text/plain": [ "
    " ] @@ -460,19 +428,9 @@ "execution_count": 12, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n", - " FutureWarning)\n", - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n", - " \"avoid this warning.\", FutureWarning)\n" - ] - }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAl8AAAHiCAYAAADWA6krAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzsnXl8HVXZx7/PzNybpM2e2ybpRsuisrkA4o7gAgIVN7QtZSkIaKUspa/1ZRUUKFSpQIsI9GUpQls2EQuIKKgoKigKiMjWLXt6sydNcu/MnPePM3dyb5K2aZut6fl+Pre9d+bMzJmbzC/Pec5znkeUUhgMBoPBYDAYhgdrpDtgMBgMBoPBsDdhjC+DwWAwGAyGYcQYXwaDwWAwGAzDiDG+DAaDwWAwGIYRY3wZDAaDwWAwDCPG+DIYDAaDwWAYRozxZUBE5orIb3bx2NdF5OhB7tKoR0SeEpEzRrofBoNh8BCRo0WkcqT7YRj7GONrD0NENorI5wbznEqp+5VSxw7g2veIyDW9jj1YKfX7nbmeiEwXESUi7cFro4j87052e0RRSh2vlLp3pPthMIx1An3oDLSiNtCh3JHu1+4SaGBHmg42D/P1jaE5ghjjyzCSFCqlcoGTgStE5PODfQERcQb7nAaDYdj5YqAVHwQ+BFwywv0ZLD6glMoNXoU7e7DRtz0XY3yNIUTkHBF5R0QaReRxEZmUtu9YEXlTRFpE5Kci8gcROTvYN09E/hS8FxH5iYjUB21fFZFDRORcYC6wOBil/SpoH3riRMQWkUtF5F0RaRORf4jI1B31Wyn1d+B1tLCm+jtJRB4RkS0iskFELkjblyMi94pIk4i8ISKL00dwQZ++JyKvAh0i4uzgfEeKyN9FpFVE6kRkWbA9W0R+LiINItIsIi+JSGmw7/dp358lIpeLyKbge1slIgXBvpSX7wwR2SwicRG5bKd/uAaDAaVULfA0mVpxooj8M3h+K0TkqrR9233+Ai25J9CS/wAfTr+eiBwYPOvNokMsTkrbd0+gpU8FmvhnESkTkZuC8/1XRD60K/e5Ay1XInKeiLwNvB1se5+IPBO0f1NEvpHW/gQR+U+gyVUi8j8iMh54CpgkPZ63SX06Yhg6lFLmtQe9gI3A5/rZ/hkgDhwGZAHLgT8G+2JAK/BVwAEuBJLA2cH+ecCfgvfHAf8ACgEBDgTKg333ANdsqz/Ad4HXgPcGx34AKOmnr9MBBTjB548CW4GvBJ+toA9XAlFgX2A9cFyw/3rgD0ARMAV4Fajs1ad/AVOBnAGc7y/AacH7XOCjwftvAb8CxgE2cDiQH+z7fdr3dxbwTnDeXOBR4L5e93pn0JcPAN3AgSP9u2Re5rUnvHppzJRAY25O2380cGjwnL8fqAO+HOzb7vMXaMnzQHGgF/9OaQkQCZ7rSwPd+AzQBrw32H8PWnMPB7KBZ4ENwOmBXlwDPLed+1LA/v1s36aWpx33TNDnHGA8UAGcidb3w4LjDw7a1wCfCt4XAYelfW+VA/kZmNfgv4zna+wwF7hLKfWyUqob7Zb/mIhMB04AXldKPaqUcoFbgNptnCcJ5AHvA0Qp9YZSqmaAfTgbuFwp9abSvKKUathO+7iIdKKNn58CjwXbPwxMUEr9QCmVUEqtR4vn7GD/N4DrlFJNSqnK4H56c4tSqkIp1TmA8yWB/UUkppRqV0r9NW17CVogPaXUP5RSrf1cay6wTCm1XinVjv7uZ0vmlMDVSqlOpdQrwCvoPwIGg2FgPCYibWgjox74fmqHUur3SqnXlFK+UupVYDXw6V7Hb+v5+wZwrVKqUSlVQaaWfBQ9mLo+0I1ngXXAnLQ2vwh0oQv4BdCllFqllPKAtegp0u3xcuBVaxaR1LW3p+UplgR97gRmAhuVUncrpVyl1MvAI+hwDtA6dpCI5Aea+fIO+mQYBozxNXaYBGxKfQiMgAZgcrCvIm2fAvoNtAwEZgVwK1AnIneISP4A+zAVeHcn+hxDi9v/oEdhkWD7Pmh3eEqUmtGjz9Jgf8b99Hrf37Ydne+bwHuA/wZTizOD7fehpzjWiEi1iCwVkQh9yfjug/dO2vkh09jdGty3wWAYGF9WSuWhdeJ9aO0AQEQ+IiLPBSEFLcC30/cHbOv5660l6c/xJKBCKeX32j857XNd2vvOfj7v6Dk/TClVGLxSoRDb0/IUvfXtI730bS5QFuz/GnoAvkl0uMnHdtAnwzBgjK+xQzX6IQQgmNMvAarQbucpafsk/XNvlFK3KKUOBw5GGyXfTe3aQR8qgP12ptOBR+lGoAv4Ttp5NqSJUqFSKk8pdUKwP+N+0EZfn1P36tc2z6eUelspNQeYCNwAPCwi45VSSaXU1Uqpg4CPo0eYp/dzrYzvHpgGuGQKscFg2E2UUn9AT/f9OG3zA8DjwFSlVAHwM3TYw0CoIVM/pqW9rwamiojVa3/VTnZ7Z9melqforW9/6KVvuUqp+QBKqZeUUl9C69tjwIP9nMMwzBjja88kEgSDp14OWoDOFJEPikgWcB3wN6XURuAJ4FAR+XLQ9jx6RkUZiMiHg5FkBOhAG0VesLsOHde0LVYCPxSRA0TzfhEpGeA9XY8O5s8GXgRaRQfN54gO5D9ERFLBsA8Cl4hIkYhMBhbs4NzbPZ+InCoiE4IRbmq5tycix4jIoSJio2PmkmnfRTqrgYUiMkP0EvjrgLXBFK/BYBhcbgI+LyKpoPs8oFEp1SUiRwKn7MS50rVkCnB+2r6/oTVwsYhEROcz/CKwZrfvYPtsT8v7Yx3wHhE5LehnJNDxA0UkKjqPY4FSKonWsXQ9L5FgcZBheDHG157Jk2iXdup1lVLqd8AV6Ln+GrQHajaAUioOfB1YinZfHwT8HR142pt8dDxUE9r13UDPKPP/0LEDzSLyWD/HLkOL2W/QD/n/oQNCB8ITwTXPCeIlvohe0bQBHTy6EkiJxA/Q06YbgN8CD2/jXgDtXdvB+b4AvC4i7cDNwOwghqMsOHcr8AY6yP/n/VziLvQU5R+D83eRKeIGg2GQUEptAVah9Q60x/wHQUzYlfR4dgbC1Wid24DWrfvSrpMATgKOR2vGT4HTlVL/3d172B7b0/JttG8Djg3aVKOnWG9AB+sDnAZsFJFW9JTsqcFx/0UPHNcHmm5WOw4josN/DHsTgRu9EpirlHpupPuzu4jIfLTB1DvI1mAwGAyGUYfxfO0liMhxIlIYuLEvRcdE/HUHh41KRKRcRD4hOr/We4FF6JVGBoPBYDCMekx23L2Hj6FjCaLAf9CrhzpHtku7TBS4HZiBjtFag54SMBgMBoNh1GOmHQ0Gg8FgMBiGETPtaDAYDAaDwTCMGOPLYDAYDAaDYRgZ3TFfLyw3c6KjiB8//Fe6Dj+L0qkzRrorhjHKOUftO9DkmKOeO/+43uiXwbAXcfCkfD6+f2xAGmY8X4YB88n3lVL11r9GuhsGg8FgMOzR7LbxJSJTg7pab4jI6yJyYT9tRERuEZF3RORVETlsd69rGH4+esgMGt94YaS7YTAMKkbDDAbDcDMY044usEgp9bKI5AH/EJFnlFL/SWtzPHBA8PoIcFvwv2EPo3y8QimFLg9pMIwJjIYZDIZhZbc9X0qpGqXUy8H7NnQZlsm9mn0JWKU0fwUKRaR8d69tGH4+9d6JvPPPP490NwyGQcNomMFgGG4GNeBeRKYDH0IXJE1nMrryeorKYFtNP+c4FzgX4PbFszj3S5/I2O8jdNjFeE42Ay9cP5wobLeL8V4j1hgsGn/Ee8p58tl/csBhnxzprhgMg87uali6fp266BqOOmlO5vlRFER8sm1GpfdYKUWXBy1JCzUq9dVgGBsMmvElIrnoQqAXKaVae+/u55B+LROl1B3AHUC/qx077GIiuYXkisco1C6Ugm6VTUc75HkNI92dQWefsmKS9S+bqUfDmGMwNCxdv/pb7VgQ8Skcn40vDqNVwLKVCx1dNCftke6NwTBmGZTVjiISQYvW/UqpR/tpUglMTfs8BV19fafxnGyyRqnhBVpPs8QLPHNjkymFEbo7O0a6GwbDoDFcGpZtM3oNLwARfHHINnaXwTCkDMZqRwH+D3hDKbVsG80eB04PVgx9FGhRSvWZchzgFUetbqXQ/RvlndwNZn1if/79+1+OdDcMhkFhODVMREav4ZVCxHi1DYYhZjA8X58ATgM+IyL/Cl4niMi3ReTbQZsngfXAO8CdwHcG4bojxq+f/wfvPWE++x93Ltff+fBId2fYOXS/chJVr490NwyGwWKv07C//+lZvvnFT3LmCR9j7crlI90dg2GvY7djvpRSf2IHbh6lq3eft7vXGg14nsd519zOMyt/wJTSEj48axEnHXMkB+0/baS7NmxYlkUOnSS6u4hmjd3pVcPewd6oYbdeeynX3bGWWFk5F8w+no8ecyz77Pfeke6awbDXMLrLC+0mR556GfGWzj7bYwU5vPjza3fpnC++9jb7Tytn36llAMw+/lP88tm/7VXGF8ChUwuprXiXKfsfPNJdMRjGJBee/hVaWnvH/UNBfj43r/rFLp/3zdf+Sfm06ZRP3QeATx//Jf7y3NPG+DIYhpExbXzFWzo5+Fs/6bP99dsX7vI5q+oamFoWCz9PKYvxt1ff3OXz7anMPuogFjz4K2N8GQxDREtrKwecu6LP9rfvWLBb522or2VCWU8as1hpOW+++s/dOqfBYNg5TG3HnUTPPmQiYzi4flsU5Y8jn/aR7obBYNhJ+tUwE2BvMAwrxvjaSaaUxaiojYefK2vjTJpYPII9GjlKs12aG+pHuhsGg2EniJWWs6W2Kvwcr6uheGLpCPbIYNj7MMbXTvLhQw7g7U3VbKisJZFIsuap5znpmL2zxNsnDpxE9X9eHOluGAyGneC9h3yQ6k0bqK3cTDKZ4A9P/ZKPHn3cSHfLYNirGNMxX0OB49isuOxbHHfOVXi+z1lf+RwHH7B3Bdun+MwHZ7ByxfMc9KmZI90Vg8EwQGzH4TuXXsdl356D73kc+5XZTN/fBNsbDMPJmDa+YgU5/QbXxwpyduu8J3z6CE749BG7dY6xgG1bFGcpfM/Dsk1KbINhMCnIz+83uL4gP3+3z33kUZ/lyKM+u9vnMRgMu8aYNr52NZ2EYeB8fL9CXn/jX+x3yOEj3RWDYUyxO+kkDAbD6MbEfBl2i68fdRBVf396pLthMBgMBsMegzG+DLtFdlYEv23LSHfDYDAYDIY9BmN8GXabSQUROlqbR7obBoPBYDDsERjjy7DbfO3Iabz5t9+NdDcMBoPBYNgjMMaXYbf56CEz6Kz890h3w2AwGAyGPQJjfO0CZ112MxM/eRqHnLR7NdbGEk5nw0h3wWAwDIBlVyxk1qcP4VtfOXqku2Iw7LUMivElIneJSL2I9Ov+EJGjRaRFRP4VvK4cjOuOFPO+8ll+fcdVI92NUcXBU4uo2bj3FRg37Pnsbfr1+S99g2tue2Cku2Ew7NUMlufrHuALO2jzvFLqg8HrB4N03QERb2rlawt+QENz66Cc76gjDqG4IHdQzjVW+NrH9mfj30zKCcMeyT2MYv1qaWrg2gtOpbW5cVDOd+gRHyOvoGhQzmUwGHaNQTG+lFJ/BAZHGYaAVY8+TVPVO9z7iDEOhoppZcW4zdUj3Q2DYacZ7fr17GP341e/wu9+8fOR7orBYBgkhjPm62Mi8oqIPCUiBw/XReNNrax75jlu+2op6555btC8X4a+TIgm6OxoG+luGAxDwYjoV0tTA/985mFu+uoU/vnMw4Pm/TIYDCPLcBlfLwP7KKU+ACwHHttWQxE5V0T+LiJ/v+OXf97tC6969Glm7ie8tzSbmfuJ8X4NIWcc8z7e/Iv5fg1jjl3Srz8+vnq3L/zsY/fzxf3hgNIcvrg/xvtlMIwRhsX4Ukq1KqXag/dPAhERiW2j7R1KqSOUUkec+6VP7NZ1U16v0w/XhWhPPzzfeL+GkH0nxWjd+NpId8NgGFR2Vb+OOmnObl035fU65fACAE45vMB4vwyGMcKwGF8iUiYiErw/MrjukOcmSHm9Yrm6fngs1xkU79ec//kRH5uzmDc3VjHlmDP5v0d+Mxjd3ePJHZeFk2jD97yR7orBMGiMlH6lvF4luRFA/z8Y3q8li+ez8NSZVG58l1M/exi/ftSsfDQYhhtnME4iIquBo4GYiFQC3wciAEqpnwEnA/NFxAU6gdlKKTUY194ev3/xFaprunngtZqM7ZPir3DxN7++y+dd/ePv7m7Xxiwf2beQ6vX/YcoBh450VwyGATFa9eu1F5/n+ZouVr9ambG9cMvzfOXMC3b5vJcsvW13u2YwGHYTGQYN2XVeWN6nc81ZUygcNyg245DSvNWlsLtyxw3HGPVNbVz6TAsf+eq3Rrorhj2Qc47aV0a6D4PFnX9c30e/ynN8IjmjP01NsrOdmk6Tg9tg2BkOnpTPx/ePDUjDzNNlGFRiBeNJ1r870t0wGAwGg2HUYowvw6BiWRZFWT5b203KCYPBYDAY+mMPNL4Uo3mmFAj6N8o7OYR88YjprH/59yPdDYNh1KGUYk8QsFEdjmIwjAH2OOPLdrvoVvao1S+loFvZ2G7XSHdlxPjUIVOpe/Plke6GwTDq6PLAUu7oNcCUwlIuXWbBssEwpIz+yPVejPca6WiHLicbGI2xuQrbbWO8t/fm4olGHHLdZjzXxXb2uF8xg2HIaEla0NFFtg1B9opRhVKKLi/op8FgGDL2uL+MFoo8rwHMyGxUc9T7JvDupneYst/7RrorBsOoQSE0J21IjnRPDAbDSGKGN4Yh4VOHTKHi5d+NdDcMBoPBYBh1GOPLMCTMKC+hvXbjSHfDYDAYDIZRhzG+DEPG9EKhzdShMxgMBoMhgz0u5suw5/D5Q8p46t03OPDwnS+QvmTBHNr7yRWWm5vHJStWD/pxBoPBMFjsjg4ZDds7MMaXYcj4xKEzWLXq2QEbX+mi0xSvJ5JbDICdPY6Dz74RgPUrz9/uOdrb29j37OV9tu/oOIPBYNhdUhqWrl/Qo2ED0SGjYXsHxvgyDBnjsqOwtf9pxyUL5lC1aQO+8sNtvu8RKSij7KSLcbq7ECsCQN3ay3l1xXwA3Lb40HfcYDAYtkPKyGppiGdqWDLBpNOWMiGZROxIuD2lYUa/DCmM8WXYKY6cfyvxtu4+22N5Wbx423l9th8xvYCrz/4SCVfnBmneUosSC9/3sMcV4nc0I5YFYmGPK8Rtraf6kesQJ4vy07W3y84tYtK8mwDYtOL08Nz9ueeb4vW8vnJR6CkzGAyGdHZ2Wi+9fbp+OeOLUFnjtYbZDvb4QvzWOPW/uhHluVg5+ZTNuQ7o0bAd6VfqGoaxjzG+DDtFvK2bg8/pa9i8fueifg2z7u4EdQ3NHHHl4wC8umI+k+bdRGf9ZuyCidQ98D3Kz7iJRHwzTkEZbkstkcIyalZdjJ9MAArluWyt3wSAl+jmOzOPxBILX/kZU5Mlx52H091NfN2N/OvWBfhJXWVARPA7W7ls3kwTN2Ew7OVsb1pvWwO67NgUDj77xgz9cgpLAai9fzElJ15MNDaNRP0GohNn4Ce7qb1/MX5S62FKw3zP5bwTj0AsJ0O/AHzLZsIJF+I9cEkf/bJth2R7I0sWzDH6NUYYFONLRO4CZgL1SqlD+tkvwM3ACcBWYJ5SytSf2UNIN6qq4q34G+oAcGyh8elbaYvXoHyPzfVgjyvE62xD+S5ObhGOlY3vuby6Yj529rgBXa+nrlxmCRYRC7Ed9lmwiq217+o+FE0GoPa+RSS6uwBBbAflu0w6a4U+i5vAb61j8vQDeHnJ17ls3sw+1zRG2d6L0a+xT+940qqNbwOw5cmbcVvjQT1Lj4b6GkTsQL+KsW0Hyc6jc0slr69cNKBrKTfR73YRCxCcvBiFX7gAlMIpnhzur71vEZ6vNa+3fkWiWXTHN1O5+lKjX2OEwfJ83QOsAFZtY//xwAHB6yPAbcH/hj2AlLfrz3d+H0+1UvurZQC47U0o36P0G1cTlnqyHfA94utupOTERaAU8vI68o/4MvUPXg4IyUQ3KjCslFKotJiJdMSJ6v8th0jxFGrv/y4AnfWbtV2WKs8i2uByiqbgNlbitTcBkIxX6Gv4LtGsLP1eLMYdtxDPyyyRULHm8gGNKs1KpDHJPRj9GtOkvF2vr1yEUlD/K+29d1vjlH7jB5mNRQL9ulh/VgpEqH/o+6TrVzhI7FWnM/VRkAwNU8pH+S6JtkZ6DywRAcsmUjIFFHjtTRn65TsOtm0b/RpDDIrxpZT6o4hM306TLwGrlP5t/auIFIpIuVKqZjCubxg6jpx/K1XxVuqXL6arrQkrJ1/bPdFxTJx1MQ1PLIMgsNQpKKX+4avxO1vxO1tpePJmLRzJLrpr3sIeX4TX0RQKUohKe+O5uI1VeB3N1N67EAC3o5Ha+y7GbY0jlqUNt8DIEzuC8tJqtdgO0Tztys+ZOA2A7vhmyqbuGzbxPI+s2LSMLkRyi/sVpd6YlUhjD6NfY5slC+bQFK/n1RXzSbQ1YuXkAVrD7PGFRCZMJ9lYCUDjb36K39Ue6hdo48fKzu1HvwSkR778ZDcohdtYCSJ4HY1aw0RrWPzxpYjlAAosrV+hpz5Nw8SyieYVZ+jX5OkHAFCF0a+xwnDFfE0GKtI+VwbbjHiNcl7bFKfk5KsRyybf93CKJiF2hNpVC/ttrxJbKTvtRtyWOiIlU/WUX2c7tasWonKLdBvlI2LhNlWD72nh8T3cljrEier9lk1K1gQoOfFiGp68Ga9tS8+1lE9yywZAcFvjVN9xtt7u+4Cic8tmPTr1vXCawfc9PHfnCutta8rCtu0Mo84wZjH6tYeyZMEcKja+y4STr9LeJ9/VGuZEqF11cZ/2fqIzQ79AT/vV3r84bNOjX1XBQV4f/UrNBCiU9oABsZmLaPj1ih4NUz5uY0XY1mtroOqn81C+R3dLvdGvMc5wGV/SzzbVzzZE5FzgXIDbF8/i3C/tfIJOw+Dh+4po8STEiZKIV4AIykuifA88Ny0+K6Cfn6qdW4RSHspzUW6Cqp/O0zvEQiW7qL7jHL3acXwhXkczynP7nEjEQrnd2OOLicamoZQi2aD/HkaKp2DnlVA2dykAbmMVdQ9eqc9j2WDZKCyyY1MQsQHBTXSj6Ikvc90kDXVbmH/84YhtY4lFQUmM3Ny8jNHiqyvmh6PO7vjmwfiKDaOfXdKvUxddw1EnzRnKfhl2QHt7G5HcYiLFkwMN26w1zE0GOrMT+F4f/UL5KN+j/qGrsHOLQv0SywoGgfS0tWytYbnFREqm9uhXyVRQKtSw/vTL90EsK9QvIEPDUvr17RMORxShhk3eZ4bRr1HKcBlflcDUtM9TgOr+Giql7gDuAOCF5f0KnGHH7GxKiNQxL79Vpaf0ApSCREOVjquybCwnolchWjbYTjCmSyFhLFdvxHJA+TiFZUw98+Zw++aV5xGbuYhxZXoEVnH3hZSceDFOQWnYpvb+xWFcmGSNo+aeiwDwOnQOMWt8EVbWOMSJoNyk7pdlE193oxaujmaU7yGWrY1GFInGKn1zwb36na3gu+BkYY0vwm2uo6Guhoa6GkDRuGSW/j62EUxrGNPskn7d+cf1Rr92kV2JTUod01BT0aNhwU8g2Rh4qSwby47gu4nQExbGjg6A/vSr9MvfAyArNq1f/QL0ADMw9iQ6jtp7F4b6ZY8vRqG0hgUhHL31C1SaMaforN+M8pN6+hKtX6HBZ9koBDfRyaZ33wKg4dqTtf4pj6r7L2Hy3CUDvmfD0DBcxtfjwAIRWYMOVG0x8RJDy/ZSQmzvGJwoU8+7N9xWfc9CHRMR36R1rJenS7LGUb/2ckAbQX5Hk55OTHmmfA8sS4tcMoHX3UHF3Rfq9tFtrH4UwW2s6hHFwMsGUDr7WkQEpRQ1d5+PWA6lc28A0IZXgJ1bROzERTiFpdTev5jYiYvImTiNjctPo/6hq3R739c5xoLbsvNixE5aTDQ2jeq7FjDprBV6arOhkmgwWqxYfuo2vz/DmMXo1zCzK7FJqWMal8xiSqBhSvnU3LuQ6ITpJLZsDLYFi31QSCQrwxACMvULwPfwO5qw8ybgtW0J9QvA62jq0w8RK1O/IMMLVjbnOlCKmnsXolCUnfGTcFCnvCRIX/0qP+MmkvEK6tZetk39Kp19DQ1PLAsXCjSsW0bZGXpxVErDEvUbaHiqx3g0jByDlWpiNXA0EBORSuD7QARAKfUz4En0Mu130Eu1zxyM6xqGhprVl6ESWwHtXUo2Vuqn2/e0gCkdn1C3+tLgCJ2LS7U1BB8DA81ztcB5KpxidIrKdZNkl54CAOLrbkRsJzTGnPxSne+rZEqYXkKPZFVPX3quHL4PPV/ovDrKd6ldfQlua5y6tZfp7b6PPb4QiWRRcux54VLv2vsW0Xt2SSk/HDnrnGP6ihtXnEE0t4hkeyP1Qb4xUT7nnXgESrQYpqYtwawkGu0Y/RpbKDdBzb0XBR/A72gi0VARalgyvgmUwkvpFdroERHtSUrTL30K7XVSbgI7fyKx4y8k/tTNqKRObVP/y6UopRDbwW2NEymZQqJuPdGJM8L4MLEssB38zrZQ95QfTCsG1xMnlRFfQv1KNteSbKql8tbTg2O0fun2UUrnaA9W7X2LglQWvb+Mnrd+MqEXBLTG2bjiDARl9GsEGazVjtsNbAhWCfU/12UYNv585/fpiLey76nLMrb3nopUia2Un/ETAGpXX0bDumV61WJnKwBWTj6RkimUHPsdIjE9G1N9z0Xge3gdzcSfujmcjlQo/I4mnPyJ+B1NKLcbf2sLdm5xmohkUfKFBdQ9eAUAblMlWJHQFJKo9q4p3wc32ZMbx7Izpj1FrIxJT6doMngepbN+SCS2D4iQbKggUjI1WIWkl4KnRp06yJ9witNtrNbvPbcnuBbBT3aRaGvUx0WilM2+BoD6Xy5l2jd1bp70FUpmJdHoxujXnsHrKxfRFa/vk+eqj3Fg2ZSfoStiKKWoW3MpDU8sC1ZL29jjC1G+h1NYSvlpenag+p4LKT/9J1QuP6WPfoloY8jOycNPbCX+xI147Y2hhomTRclx+tejbu0VJOpiTSpnAAAgAElEQVQ3BKko/FCdlO9T/+CV+rqpKcmUfgUest7GU2phgNgOU85bhfJ9ko2VREqmIiJU37UAK5IVJnJ1iidnGFve1qZwFWemhqUGp57RrxHEZLjfi0h0dVI++xosW3C9nqf0lTWXs++py6hpDGIsFOFDXDr7WtzGKpSboP6hKwGInXgx2E6/AatOfozSU27Ajujl2MpNULNqEbEvLKDu4asp+dy3sXLyiJRMQblJLBHqH/ge0/Y/EHei9orZTgQrv1QHqaLd9DX3XoTXFqfuoSvJyp8AgN/egPJcqm7/ZoboiGVj58UIxqwD/4LCtGEpV74frthMLS+380rA9yg/fRm19y3Ctp0wgLXP4gODwTBoeF1bKZt9DbZtZ+S5qlhzOZfNm0lLQ0/dxPRnseTY81DKp271pSjlUXJCMG3Y+3EV7U0qm/sjrDT9ikSz2HDzXLzOVspPD6bxmmtDDYuv/l+m7X8gAFtsm2n7H8jmd97IMKac/BhTz7yZ9T+ZRXz1/wIp/fKoXHFar44onIKy4PrJPvezLUQk03mvVBjMr7yk1jClEMti4qxraPjVj4x+jSDG+NoLcT1FzoQp4edIbhEHn7OUqiXn9GkraCPEKZ6MPU57qpziSYCOywqn43wPiWTjtdbjNlXjpbxIvgcCEccma3we3e/+jXGHfF4LkwhONCvjerm5eVSsuRzJztPTjQFWdBx2/gSS7Y28f8Ft4faqjW+TFZtG9T0X8f4Ft/H6ykV4XVvpbq0jvvp/dbCqUhmiJOkBtkpR/9D38TtbUb5P/HG9YtLvbAXPpW7NZfjdHeExfhDjUf/QVdi2eXwMhuGmd56rSG4x+569nH9cP4vaivV92qcGUSlPe6oqhtavbkD0dGRDJSC4TVVBPq6eBKciguNEiAR65SKhhqVjicX6lefTUF+Dkxfr2R6EVIjtZOgXaA1rWLcs3P76ykV0bqmk9r5FOKmpyDQNS9evlJfe62ii8tYzUL4XapjyfWp//t2e8A/RScmU79Pw1C1Gv0YY8+2PQY6cfys1jW19jCk3mcDp5fVKx1IeyaSLaouTqN/Qa6+OadCxDUGSwV7CUz7nOmrvX6xzgpVq465zSyUR2w6aW1g7eOAvWbGay+bN7DeLc+2ay7H6iWvoileSaGvk1RXzM7ZP/NJ3qf/lj4Kl5S5ua50Onk1263gLpUjGN+F3tTHx5Ksy7qnh1yuoe/DKYJT4Q1LKJ2KhlE/DE8uwd2KVlMFgGBhLFsyhpSHOP66flbHdTyb6eL3SEeVTdf8lgCLZS790aIFCLCdcUdijX1oPo4Vl2LlFiOX0SXBaa/UTT9UPBSUxrr1nHQu++FEmfjFzcVN3fHOv1eE9JNqb+uhXbOYiollZVNy3GOVl6leK5JZNQcyYMPHk72fcU8Ovb8VtrqV0zrVkjD49l/gTyyCMMTOMBMb4GoPE27r53Pfu6LP9N0vO4cBpE3ktqM3Ym/IJRVTFW5HxxWSVzgi3J+I9iQABxMoUkNQUI621YSxB5xYda+DYQjQ7h5o1ekVk1PfY8vjScJUOIniB8LhtetogNzeP9qd/0qd/U6fv1+/yc+X72LlFTJp3U7ht88rzQpEWy9HTCEohdgS3pQ6/s5X4E8sQ28HraAHRI2Iroke2Esnqcx0IAvx9D6+9ES8YdaZWP/mdreF7sRwmn7ei33MYDIZt097exoe+1zfA+x/Xz6Js6r5hktDeFE7QU3WNW+qJlvYkD03ENwcGVzCAyjCkJJxi9FrrwkD3VA4sOzVwVD7J9ka645vZ8sTN+MmuMEbU62gOa9fmBGedvM8M2n97S58+btPbpFSGflXcfSGR4sl4rXWI2H30C7TeNDx1M25bXN+ZCJHYPohlUbv6UpSbMtIy3P4ggt/ZSgJBud1Gv0YIY3ztpbRvqcYPlj93tTXx3PLFdDbFtXc7Wcvm5ZlxCPb4Apxx+fi+XiINaGNJaTFTbgLrt0uxOptoevQqyot1CQ8fKAT2nzGBeFs33aUzaH39D0w65/bw3DoQVGh4RNdY297KmiUL5mQEgLY0xHG9JJGCsoykgVY0m9o1lyPKp271JSgBFOHUg503gZIvnIftRKi+/xLEyhwFqsRWSmf9kPiTNxEpnhqOKJMNFWDZ2LnFuoakCLET9QhXLxPX7eofvDLsZ25u3g5/HgaDYefoileifD/0eifbG/GTCZTy+6SESQXGixMJV0L26JeNchN0//YW6Gyh4ZEfhCv9ANYDU2YcQHt7G5OnH0C9203ZaT1pfFL6Vf/glUzYVwepby8XWe8A9paGuL5+mn6J5VB73yIdDuF2U7Hi1Az9Aq1hE2deRP3aK/F71cdNVRqpvW8RkeIgxCRYcIRlY48vIjbzf6hdc4nRrxHCGF97MEfOv5XX1tfii52x3fN9rM31HDhtYrjtjc31uJ7HU9d+s3eqLpSXpLOlASwHAZzCUmInLCRSomMjqu86n9iJF1MwaTqd3ckw+Lzm3oU4ojjm/KW8fuci1v+8b7mO3v195dm78Ls68Frqw+2Wk4XgZwjetuhP1C6bN7NPTqDJ31nB+pXnc+0967bbDqBGRE9DkhZ0GhZs8zJrR/oeWE44QiY41nYcfF+wHP1IyQCnKQyGvZUlC+ZQueHtML1BCuX7jKtY36f0je+5vHTdyX0C5ZWXpLslKNlj2Qg2TmEZ5af9GICaey+i9JQbcJuqw0z3qe22CO9fcFsfrdhWf9evPB+3o6mPfmXHpiCWtcOUDNvaf9m8meHqQiD0OqX3a1v61V0SCxYbBPnLlNJvA0OqX/1K6ZrC6NcIYYyvPZh4Wzd2XozpZ2ZO0a2/84I+cV2upxDL5oAL7qF9SzVeMqED3osmUXf/9yg/4yf4bgK3pU6nllB+GEwvThZ1D15Ja0EJCbcn3kIiWXS1buG55Yvx2vomG+zNi7edx76nLqOyXpEdmxy67WFkS12IZWGnRCcVMyHoHDwBViRLB7daNtHYVMR2yCrQxq3tOEyefgBVG99GobNMKwXtXdqga4q/y5IFc0yuHIMhjfb2Npy8WMZ0G2SGDGQgwowL7qcrXonvBlU2fI+GJ2+mLEiNowINiz++NIyNkkg2tfctwtvaHC4aSm1PtG7JCHnYHqnn9zszjwxjwkYLeuCqVzumVj1aYQFwjTiRDP0aV76fXhlu9GtEMMbXXkjuhEm01GwKP3tbm6lZtTAs4up3thJ/fClWTj5ls68NQwaS7U14XlpW5Q6frAlTmTr3WtYvnzfg61uRKN3Vb5I95aDBvK1dRpSvpyiztXtdZ63WK6DEyQoSsGr8zlasnHwkkg2q7x8I5ftEYlOxc4vwnSxUohPJzmPT+rf4zswjAXAsoXzavjtdPsVgMEB2bIourxN4erytzdSuWqi9OPRoWP3DV1M6+9qMhUFeR3OafjUTjU1j0twlbFpx+ojcy2CRm5vH5gevxBpXAOj7TNRv6KNfXkdzkGQ6u9/zpOvXxFNuoPbBK/voV6pmJGA0bDcwxtcYRCJZ1Ky5HCuWH26ribdCelyA8gmtKuVTesoN4S63pQ6noJS6B74HAirZTemsH3L4AZN4+e1qIsWT9DlXLWLisfPp3FKJZfW/iqc/rEgW3VVvDJrxlZub128iwN6xCttqN2WGdvenhKSpqw3JziNSPJnSOdeRCspNxDfT8MQyys+4iUT9euofuhoAz3Wp2vg2nqtXUOrM+Ao/0Un56T9BeUncpuqwfuWmFafvUvkUg2FvwIroeM3u2MTMHWneMOUn0eKkeumXDu50W+poeGKZTpDa3UnZaTei2urxXJdIkKS5ZtXFxI6bT3d8c7+rqIeTgWjY9tpcsmJ1Rj3Mhq3NoX7pcmza81X10zOYdNYKEvXrg/qQ3rb1K9CwstNuzNCv7vjmcDGB0bBdxxhfexjpBbOr4q2QnU9ntxainCz94yw78UKs3y4NY7COnH8rVXFALFrrgozHqZU/wbJrKxLVOraDwtERxyYnKxK+P3SGztZsFQ88KDM7N5/mvz5CxxvPh9uS7Y1Mnb7fgM+RzkBHWal2vYv2pt6nROyyeTPpJMqWB76H6wYij8LraAag5p4LcdsaiJToQNb4uhvDepNeRzNOfgzJGo8b30ztfReHHsVU3rKeUkUGw95F+rPXFK9HsvNIJrp1rGmQQ2vCiRfS/dtbwlin8BjbprM+CE9QgNNTCkiHBagd6pftOGGuLseJhHFW3QOIN00hytcVPfrZvqsMRMPS26R/j+3tbWHW/5SGnXfiEaF+ie2gPA/Q309Kv5y8EhBLF/AOSNev+oevwu9oovb+xRn6JZZDfpZJs7O7GONrDyO9YHb98sW4KrNEzraOKZ99DbW/WgaWBb6vXfFBnivl+yTqNgT1E/V0m9tYhdsaDxL3+cjOZIrfDrG8LGhrx8+y0CEI+lcwN7bfsLmqB+J1Ovhs/R1vfueNjJFy2Wk3BiPIeZSdcj21QX1Lt71Rr5ryXNzWeOhUTBW5BQnLilT97KwhujODYXST/uy9umI+nlI71K/UMc03nU0kNlUnQxWBVPZ339d5CZUCJ4KTX6rL6bTGqbz1dJTv4zZWEc3O3maOsJ0htfKxN7mxA/ppPTTsSMMKJ5Sx79nLM/RLKUXd/YspnbuU6jvOoXyeLrBdc9//4DbX6lXrKf0KKP3GD8K/Cyn9qr3vYsjKwbB7GONrDyaanUNXfSUVt54ByscOYhksSzh0n8yRnGMLWDZ1qy/FHl+kM8+nBZRjOzgFpbiNVYBCIllE8mPMOGsZ6++8gIgzOL8qqRqS/3yrgtv+m8+HTzhlUM47lEgYuCq4TdVg2Sjfp+a+/8FrrcceXxTUjCuGjkZKZ1+HnT9BL/Mu0bUvkw0VYT4hg8EAdvY4EvWb9ABPeViBHqXHFKVjRbOpvmtBGHeZrl+R2D66jqGbxG2qQqHCkj6bV56HHYlsN0fYzrCnxTOF+uUmUL6H21Qd6pdKduF3tupFR2n65RRPpubu83GC1aFGvwYfY3ztQejpw1bqly8Ot2XnlxDNzqGQ9n5TPaSOiXUnKT5uAfF1N1I6d2kY14WILkGRgaCS3bhtcdYvn4fn+9p4QxtxqQSqyfYmXr9TB3PG8vpPSrotPvSeqbh/eGmnjhludKkSlTZNqL1/kcIyHbTru0ycdQ3R2DQS8c1hwW41gOmH5i21fTJaAwNadWUw7Kn0/r2PBnVa3bY4t657sd9jWhribH7nPxQfqwdu8XU3EjtpMU5BKWJHqFm1MPMAy0Y8hdu6hU0rtOcrlSzVtu1wZXWyvXFM57Lqq189pPRr0lkrSATfR4Z+7aDOY0tDHF/5fTQsPdGsYfsY42sPIjV9mF6XEaDi7oWQtnglPS5sU01cu/X7lLVI5bNSWNEc6tdeHiQObcTOyYeuVg7br5QXbztPn+/pG3g9ODIVmvqBGRNCT9auYG1t2OVjh5JUYGtTvD7QoCB/ju9T/9D39agb0TFg6eU++hGscDrFczNKNnm+QrAo+cICgDDVRf3aK4fqtgyGEUeJ1Se1BJCx2jA9nql5Sy2er8vn9DqT/tdLYkVzqF11MV5HIwB2bjHKc7Eti1vXvajP9/RP6F31cer04Qt1GG5yc/OouP+SDP0CrWF1a69ALFuviIxvDnJ/2Rn6lV6BRLmJDP3Ssa+6+HdhoF+gNax2zeVM2MXY3b2NQTG+ROQLwM2ADaxUSl3fa/884EdAVbBphVJq5WBce28gZUxVxVspcT2SQdC8ZVnkTpjUp316XNjm687Bzi3GKdLtxHICN3RPwP3Er19N7X2LKDlxEfHHb6Bk5iIaHv5+eL7dMbC2xwemx6hZ/wbl+x44JOffFXoH41s5usC3FR3H5LlLqLj7QmInLqLhqZtJNtcAenUovkeysVInXvXTcqHZEZ3M0HZofOY2nXrCsrBy8vHattDw1M2I5VB20sV4noev/DB4Fsyy7eHCaNjQkf5M+b4XBs2LZZEdm9Knfe+4MNxkj3450aDodaZ+AUFG+DZiJy5C+e6AKmaMNVoa4hn60Vu/ADYuPx0nf0KoX0Af/VJB9ROxnFC/orFp1K6+pI9+6YY2E2fqRQj9LQAw9GW3jS8RsYFbgc8DlcBLIvK4Uuo/vZquVUot6HMCww5JGVP1yxcTLZ6MH4xQ3KYqWmo20dXaQM1WmyPn39qvoSTRcVT97CwdEO771NwdBJZbNlbWeCae/P1gJYsunh0tnkQktyj0ng0VX//ke/jfdU8Ou/G1vSXb6cJftfFtrPxSxImGJUms6Djq1l4epA7SSWpTAcNWJCtcEQSiU36sWhjmHQKYOOsandw2vxS3pZZobBrVdy3A8zyyYtOI5BZnBNKaZdtDj9GwoSX9mWq5+RycwlKUUrhNVWyt1f4o5etBR3/TfxLJDvUr1Tb++NJM/XKioYZFYlNRbqJPyZ2xxLY0TPnudvVLN/Lw2rYAQqRkCipYuNDzHWrDuLd+pWZGJs6+NkO/AKrvWmD0aycZDM/XkcA7Sqn1ACKyBvgS0Fu4DINATpYTlvjxLIf80im05peEJX76o3zOtVTddT5lp92I17pFz+l7ujRO3dorqPrZNwGoX3t5MMIcnmXEZSX5WFtfHZZrpbO9kVj6qBGCJBNuAuXpYrux4+ZTu+Zypk7fj03r39IpJgLx8pPdSCSburVXpLJThCeJFJThJ7vCFUOpnal4DM9NYhgxjIYNE7atUz0kE92IFQkzxUfzY+x79vJ+/1jHjr+QLY/fEKw0juI2VumpsDT9EssKvTXKTQyTgo0c2ytTlE5v/QJdy7JHvyw9tRik6Qj1CwhqFIX6NeHEC6l77Aac/In01i/DzjMYxtdkoCLtcyXwkX7afU1EjgLeAhYqpSr6aWMYYlLu+2RjJU5BqS7UatlMnncTNfcuZMZp1w1rf8rHeXR2tJEzfnQGvDr95QSKTeSSFav59nEf1DlwQMdLpDJp+x4fvuIXALy85OsosULh6o0ViQYj+rH+52JUYzRsT0EpnKJJGfrld7Yx+Tt3I2JRc+9FYR4vw471K/SIDUC/+sOsgNx1BsP46u+vRu/I418Bq5VS3SLybeBe4DP9nkzkXOBcgNsXz+LcL31iELo4Nohm51Bx90KSrk5457Y30pJXRDR72+tLlO/TXb+xZ0NaqY1wCbLvkYhX4LY36uD94FrQPgR3kcmnD5nCU//8Iwd98sQhv9bOsr2VUUsWzAHbpmzuUiCtIDdQdfs3Wb/yfJq31OIrHXdh5ZfqVVh2pCeI1fdIxDfjdTRRt/YKsgomYGePG/4bNQyahqXr16mLruGok+YMdl/3aOzscVTfcxGum8TvbCOaVxxu749Ee1NY9B7oo186/kuRbKhEELz2pjAB6u4kPR0L7Ghlp9hO//r1s23oF2gNS+VlC/QLdBB+9T0XGf3aCQbD+KoEpqZ9ngJUpzdQSqUva7sT6OsC6Gl7B3AHAC8sH5zMnmOET5yjA0tf21BHzoQprF95AbGZi8JtNfFW9j11GVsaW3qmIH2XhnU34nU06xxVKZQi2VChMxoXlJJfOoWWvCKOOX9p2GRb05iDyVGHTOPelS+MSuOrbOq+4fvu2MQw4zZo975YDm5zHUBYZw7fRVILScXCHpePlZ1H7f2L8TuadC4iCDJGR3CKJ2GPL8Jtb+D9C24blvsy9GHQNCxdv+7843qjX71IJS+u2vg29b9cSsnMnvQ4VRvfpilej2NJaCioZDfxdTfid7bqHIQp4yvUr0aU7xEtLMOJZhHNKw6fo7093mh7+gWAUn31K/gEffUL0BrWUJGhXymMfu0cg2F8vQQcICIz0CuBZgMZmTNFpFwplVpacRLwxiBcd68hlpeVYQjVxFuJ5BYhTlZG2olIbhHN5JDwW8NtTiRK+ZcXU7n6cqIT9gGCcmheT4yR11rH+uXzsJSXcZ2dzd21K2RnRRhHF24ygbOLLuy25kbW/Oi7zFn8Y3ILinarPwOtEwmQE5tM05PLAMIyHl57Ezml+7Dv2Tfy6or5uG4yXGVUdf8lOlCYXgV+fR98d8DXNQw6RsOGkP6eqaZ4PXbeBLKCgO0Uyvdw6UmeakWilH75e9T/cinYTljSKxVnaeXk43U0U3XHOYD2du1pubtGSr8sS/roF4CIsO/Zy/voF0DFPRcTf3xphn6B/nti9Gvn2G3jSynlisgC4Gn0Mu27lFKvi8gPgL8rpR4HLhCRkwAXaATm7e519yZ6r2DsST3RFE4Tgp4qTHR1Uj77Gg4Oai7WL19MzoQpKN+jdlXPKFN5LhHHJmopOp/+4fDcyDb45HtKeOvt15h+0OEDat9brJ7/xd24G/7C84/ezfFn9k00uzPszLLo1Cge9Kg9/vRtuG4dnVsq+fuSWbqKALC1dj1iO0z8+lUkGyqJTJxB7b0LKTtjWXh85YrTzbLsEcJo2NDS3+/0kgVzqNj4bt8aiZZD2ewfhvFJr66YT1ZsGuJkUb/2CuxcbZwoz8VxImTlFpCTm9PXqzPKSdewl55ai137Cisu+joLbnpotwywndGPVAki6NGv1DRif/rlFJRSctx5Rr8GiUHJ86WUehJ4ste2K9PeXwJcMhjX2ttIT5has6WJpEq53QEBr60JlE/+xCl84pyreerab1L92FJSQ3Svs5X1/6cNtLK5S8Pi251bKjl0RumwTC1uj3hzO4/+5s/kHMCAja+XnlqLU/caLz65hg8fP4s3freWZUdHuPh3a/jUV8/c7dHjruIntmLnlTDprBX6c7Kbuvu/h1M0CbelLoyxU24C5bu6Rl0a/dWLMwwPRsOGht5581JxRCiFEki06cSoKI+ciftA6xbqHruB+mAFnt/ZSsVdF4DlYOXkUT53KU40i+745tBAG6zpxa6t7fj+DmaKleLdfzxHsq1JH9O5led//Us+dfyXyd5O7G1vXnvpTzS+9SL3XPFNGqs3cOwMeOffm1hz/UUc9vFjMi/pRBERJu53CMXl04lmZ+M4kZ2+vx3hJ7bi5McoPWUpViRq9GuIMRnuRznpCVOrblrEpFNvxHIiJBoq0wqdLqKjvZ3XNtSBWJSdptuICIl4BZHYVCpvnUfNqouJONqln2xvworlD8vUYp97am7nW9f/nDsuOY1VT7xAa30lVR1/4bPB/t6erfTPSine/OMvuPUrk/nWYw/zp3UP8NXJ7Rw1I4/jN2wdFO/XYFC7+lL87g68tgaq7zhHFye3LJTvYefFsHLyiBRPCUO9xbJoaTClhQxji94FoP958zlMOvXHPeXNAmrvW0R3e0uoXwBWJItEfHOYC8/Oyad61cU4ToRkeyPdsYnA4Ext1Wx6l/+uvZZD95+8/YZK8bmphfzsyd/yo/NP5qFnXuEtv4Z92v7Ftz7/6bBZQ2sHi1c8wo/OPxmlVPi+OG8cDa0dfOvx17hpZhHz1rzGRZ8q4qjyLkqzojz01sss+M6nKM7rCVyvb26nO+nzz/WP8e6fG/lHLRz8iWM55JNfQGRoVkkPWL9AZ6OwLJri9SxZMMd4vwaIMb5GIenervRajt7WYGQhggCWE6En07PTp+wQAJZFMl6B8l28rc2QVnx7pFj1xAs01Vbwo58/zc+f/AsPn17KrNUV1FZsoGzqjAzP1mfmzM/4DPDFA2D/iTlMtDbT3t7CnINziIjPWR+K8vXH1/Chz36ZX91+7aDEUGyL3rEVqVJEuvQQqMRWyk//iX7vJXAbdfx23YNXEjv+QhAhGd/UUxxYrDGdFNKwd5HyeDXF6zPq/3lbWwk1K9CvlAGRnqA4FfQtYgUrgpvxtzajAGXZWNITb7S7vPXS76mt3MTsow/ipI/vOOHzsvt/Q1djLY8++w9++9dXyI0ofvfXVzh/1mcpKRgPwOqn/0ZXYy1Pv/AaQPj+4rnHsvrpv/G1AyO8XNlFQZbiU+VdzCgUzj8yypPvdPLos/9g/teODgeoHwgMwiMPnIpSipITL2daVhtdh31yt1L0pGtYj37lIdJbv5J6sQP96BdoDROLSG6R8X7tBMb4GoWke7v8YGVjZ7dL5e1nA0HAPKlSXFqkkq1beOsmXR/NbdErWATAsojGpgLClFOu49AZpRnXGu5px3hzO+v+8BK3fTXGcXe+wKRxHs+/28Gx+zqsWDibBctWh56t89b9goM+cRxv/P5hrvu4cMmzD6KwuODL+cxd+R/W17Zz5gcjFOQISddl/5Iox0/dyi9uuYKspjcHJYZiW/Qe3aViWEAnK0zVSRMRXVczKM8hloVTPDkc1Tc+8zNUYisoP6O0kImfMOzJpDxeVRvfJis2DTfRjQKqbj+bnrqy+h+d5kDhtmrPr9tSG55HxAqeG5toXjElMy8OpxtT7Oq049b2Ntb/6888tfJ63t5QweUPXLbDY9L16xv3v0CR001SIC9rK59f8BOeWbEQpVTY5txfvkjS87nq4zbXPvciMz/1QR793d8Y57ezPt7F2YdFKcqGwhwhYsEph0a4+9cvsLUryeb1b/PTR57jirN6EqeKCBWPXMnNj7+8S/ecTrq+9OhXQO86tSJEJ84I9UvsCGJZoYahfBJtjTS1YzRsgBjja49h+7EIYtmUzrqGLY//KMOd7zZVk4hXDGsOz/RpxZKC8cSb25n7/f/jjQ21fPWYD3J4UTvjI+PItV1u+Fw2lz7bQltC8DtcfnHLFZww3SWnbSNHFfn84pYrOHZyBzPyknxmYhN/rVQcf/NGDiix8RU89B+X2/6eJCci5EQstnR4+PJ3Vs8t41sPVQzpNGTveJYUuvJQaj4x84tXvk91sDIrlZG7dFbPgodp++uR996+TN4wttiueqVKdeXHdCB9QRmpoFa3qYpkvKLPczQYvPWXX/PPX93F7248m/zx2RTk9sRspWuYUqqPfu1TmMdHSxP8abPHbSfm8NW1W5mc38VPH3kOFBxe1I50JTmsaCtvxBX75IWJH4YAACAASURBVEX59KQEZ/7wbl7bsIX3xWxcH9b+O8nyvyVwLCjKERo7Fd0ePPLMn7n52CiX/OYvfOdrx4QeNYDxOYMTKrJD/Uqv0hGQrl+pz6A1TCwHy2LQY/HGKsb42gNJyZBY0meAYtl6GisV8+VZNvmlU6gbRDf99og3t3PsBTdRIFu59aHneOmNTby1uY7qeDO5EXj82b/xwFey+Okfa5n7/gjleUJ+VNHt+kwrsIi/9SIfP6iQqHgcPsHjrif/ilsI/94c4dwPOdzzUifFOcKCD0f43m+7iIhi3yKbdxt9GjtdCrOFme+xKbVaOOuwLFY+vXrIgvB7x7O8vnIRnfWbqPjZWQAkt2zUIqaUTkyoFGJZTDr3DvQflmri627EKZqM21TFjgxsg2FPpo8zxbK016vXjlSwt67+4JAzcRrR3MF/fgX44ZmfY/KEAqy0tAkpDcvxOzjmOzfSnXD76FdFXSP5UfjC/g7PvJtkWoHwkck2dz36LOOys7jzePhPTScPvtaNJYoFT7os+ngOt/ypitg44bxAvzq6tX693eDR3CWU5Ai17T7HTlccPSPKZzd09vF+DRYD0i9FqGGhfn1rZfgzS2kYEKSeMKETA8UYX6McxxZaazahUPjJRFiHMTUyFMvCTyb08+F7KMum9r5F4X6vo5k60Uuzk67LG5vrOXDaxCHr708f/j2tTQ3c8vUSzvnVCxTYSbo7EkwYJ7R2K762n9CehNtf6uShk3M4/8kuNrf43PC5bE5/rJPiHHjyP2388JgsVr2SZP9ii43NPuOjPr951yXLgXMOi/LvLT6TcoV3mxVXHRNhyZ8TNHQoynMtTj7QJjeqmPeBCGtfbx+2IPxU6onUiM+ORMiKTWNr/aYwP5FGqH/4avzOVvzOVuoeWIzydBbvttyCjBQWBsOejG3bQfFsHSvRo2E9+gV6e6RkMiI21Xf11C73Opq1leR5YNv4rtvfZXaZ6eUlGYYX9GjYJ9+bzfMbmuju9DP0y7GFo1c2MmEcFObY1Lb73PnFHC54qpPWLkVpjseBsfFc+VyCsjxhawI2Nvs8/t9usrehX9f/WeEr4bsfj3Dlc93MOjgCSnHWh6LMfbyv92so6K1f0/Y/KJw23lq/KYzNs5wotQ/87/+zd95hUpXn+/+8p0zZ3thdOiwdFEGNJYmxN4pYQIqi2GNERfhGEzFRE6PGxIJibIgUERBsSFOxRKNRVJSq0l1g2d532inv7493ZnYXFqOyyw+Tua+Li51TZs7M7tzneZ9y30grhHRs3GAt5UsfROgGQjPoeMP0Nr3O/xYkgq/DHP265KopxrQ8jLQccs67Nb6vZN6tSIx4o2rpojsBlQo20vMouOJv7HruFk698QHefexW0tp3I1i2u8XXaQ2UV9cz/42PuPoYD4YTxHRC3P4rD/e8L0BCdUjy8lc289ZZ9M7WuPufYWoj0DFN4+sKlyw/WI5g0SaLBRtUKXHRKD+jFwcZf6TBH98L0z5V4/w+OtctC3NCZx2Ey+o9Dqd01Xl7u+TCfgY9sjTKGiQFmS7jjvDwxIoX2PnVl4yfOu2QyVCkpKSya8EdmClZROoq0JOj+kTRNL0bbiB//IPYVUVxv01QU197dm5JTD4m8F+B/M4F7Nm5BS0tDyklRlo7cs77LQClC+9QC0ddcZhTWxY/T0rIH/MXqpY/FFdOXzf9ejSjbW9ZMQ4be6TJqm0h/nyKhz+9H27GXy+st+iarhG0JQFL0j5FcHR7VXHomq6xpdKl16P1+D2N/JXplby4waJ9Wsv8dXJXnQ2lLt9UuIw90iTFIyius+md4+H0zgH+/vybbN5VEm/laGvEmvGrykubcZh0XaR0cSNB2l/2MFblHnDtOIfF+EvX9f/wCgkkgq/DEC0p2uNLQxjeZv1cwvCQO/qe+DahK+2XvXNuQdph6ssaHVLivpBRiYmmr9Va+Mfi9/DKMFcPTuHx1UGG9dIZlK9xUX+Tj3fZJHs0NpW5pHgFt5/k5aYVIf5yupfHPonw4oYIHdN0Mn2wrdKluF4ysr9JpzSNS480WbRJBWMj+hhc8kqIIb0M3t3pMO0cH79ZFuKeU728tMni+XUWs9dalAckhoDsZAM3FEIWfsKjN13ETY++1KYB2MYZUwiVlwKgCQ2rvhJpW9h15fG+FbtqD0QFDAFKF9+NDAdAgBuspfT1B7EdKzG2ncBPFvtO0gmfmsrTfClxvlKTwbXxMnyMvwD2PDEB6VrNnlP3JVG84I64xETT12otxDjMo+mc3l3n6A46F/Yz+Xh3I3+legR3n+rlxuUhpp7k4R+fRnhrm01tBBZe5GPUoiAVQcn4fo389dEum4qQbJm/lof40ylelnwTwnahJix59gsLV0J+SoiAJan68n0GtPdx5o0P89Zjt/znN/Ij0ZS/oJHDXCuMrFcOW1bZTnCdqEuKBE2ndPFdyEgwzl/SsZGhugSHfQcSwddhiKaK9sdd/zh7yiFn2BSklPFJxooVjyJdV9nVaE3sODx+cB3cYC17599OWju1Ion5Qm58Zgrbn2/9ElxsxXjlkYpA395ucf/pXnKSBOMHGizbbFNUJ/Gbgov6mXTL0LhkoMnGUpcTO+u8t8Pmgr4Ge+skXl0SsGHS8Sb1EcnVR5vMXWfh1SSzv4xg6oKqoOS8PgYndNI5s4fBO9/ajD7CJNkUjDnCVI2sn0YIODYppsbdJ3u45Y3dbV6CdEIB8sfcs99E1uf3j+aY3y1k3fTrScrvgdANNNMLUSFJpW2kGoyFZiBdi/r3El5pCfw00fSGO3H4CWQPm6LSWboR5zCnrgI0neJ5tzU7V/P4ka5LycI/kpTXNb59wNUPsn3GjW2mZh/jsNF9DN7aFuGvZ3jJ9gsuP8rg9c02JfWN/NU1XfHXpjKXX3Yx+N3bIcYdaXJkns6ZBTpvbHOYdILir18fa/LCeotARDLziwg+Yx/+KjB4b6fN+X0N3tlhM/+iJBZutHhsdYQdVTYZyV7SPBZTf2nym9cr+Mfid9FTstvkM/jB/IWaSHWDdeSPf6gJf9l4vF7q33i4Ta7zvwGJ4OsQo6mGV1PEMlD77ttTXgv+NNLad6UhFCHWbi+tEHkX/6nJ2K/avnf2LeqL4U/DY+jxoKutMWfZR3iJsHCj5IlPQwzrpdMpXfVSZPk1zuiuM2edQ26yxog+BkLAb35mMnpRiN8ca/L6NzYj+xlsq5LcvNLikiPVqrEsIKmPSC7qbzL7SzUV5DclL39lsfLSJGwXLh1oMGJ+kKAtsV1YsMFCAj0yNXbXSob31umZpTFhkMHMlS+0WgN+Sz5qVn3ld6bcdV8SRbMm4dRXxa080HS16hfqZzMjD6e2hNbtbkkggdbBgabk6irLSM1qt992x7FJyi8gEg4136Hp5F38J8x23ZR2oYhx2KRoL5g4pP2PMQ6bs9ZmWC+djmkajoQMn8Zp3XTmb2iZv274mcmLm2DCQBNHQmGNZNyRJh3TNMoDqjH9ov4mizdZ1IUlQ3tqLNhosSrKX5cdZXLe/ADVIYkmYMjzDZiGoEemxvYql/qQxS0neMj3W1x1tIc5Kz/iomFnkXKQ7/dg+csq3wWOjZ7VUXGYYYCmY2TkYVXuIb9zAdsP8hr/m5EIvg4xmmp4NUWszBjb9+EzdxIJBXEkEKhlx8zJuFKieZPJH/sXdVIbqRv/GLy3ZjO25mNHWR0eQ2PhRpsXNzaGD5YjOTJP4+yeJnkpgiRT0CFVMKyPwbwNFuMGmpi6oFuG5NsaydLNNks327gSKgKSDJ9ACDA1qA1Dl3SNeettJgwSGJpgZH+TRZsiSAnn9zFYU+zy0Nk+zl8YYEhPHY8GVw02eXFTXatlv1pKp0+dMIz8zgXNtm2cMQXXdfjs/jFIK9yYqYw22eM6SNdGi9l3HPSVJZBA22HfKbkYPr9/dHz7xhlTcEIBAKSU7HruZjX16/GTP+7+xpMOQw4ra6hl/gb3P/JX+xTB8D4Gc9dZXHqkidcUVIdcNpS5FNVLlm1p5C8JdM9Q4tgvfe3QK0tj3gabKwYpfa8RfQxWbLWJOJJkE/JTNB4d4mPovAAaLmcV6GT5VRVh4cYQn3z5DcPPP7j3+2P4CxnVMdT0aNkxqivpOvGhoZgNUQLfjUTwdZgiEgrS+YqHqS3Z3Tjqi2pSLZo1SU0BNYGMepJJKaNlxzosIfcTUW0rO6ElD07kglsfx+s0sLteo32yxvxRSYxaUEdxvUu6T+C6gvnrLR5fHcF242+LsC1Zs9flyc8ihG3okiaYe4EfV0KSCc+vU1/y4nqXDwodLBemnuTl5pUhlm+xqAlBuk8QtMBy4eWvLX79My/tkgVjjzC5/Z0QTwzzc3S6xpgBOjNaMft1IDS9+UTqKtGTMgCVBeh84/MUz7+d8mXKmNYN1ioFaU1HHCJJkAQSaEs4oQAdJjwCQGDvNoRuIpGULryDvbNu3n+BIWNiq4rDpOuCY++XmWnN/q59seTBiQy95TEMq57KoOSNy9P+I38JIGhJvih2eXqN4q/RAwwmHqd4VgJz11lsLne493Qfz66J8PLXNnf8yssNy0Ms2hgh069R2iDpliEoqoOygOTSgQY9MzXO72vwdZnDH94NMfM8P1l+GHeEwd8/2cpptdUHpXD/XTgQf2n+NGQkgPAkxfkLTY9zWNO+vQS+G4ng6ycAMz0Xv9ekIWShJWfS4fKH2f2PCQjdVDftJqtHp041RQoBHdtltEl/174or65n1O1Ps21nIc+N8HPFqwGG9/bQLQ3O7mEw68sIVwwyOb+v+mKm+wRz1lrMWRth1WVJgGBHlcvYlwJ0ytLYXO5y2uwGDF2gAdlJgtwkwTEddDqkSs7sYfCzjjrn9TFYsN7igmi58pedTZZstnCk4NgOOqUNknN6GLy40eLCBQFSvYJkj8Dv1seti9oKsZuPHQkTqS6ONxkXPX0Nrh0hd+Sd2FVFIKUisWjQjCawKvfg8R56z80EEmgTCIE/V6nca/408i55AKGb7HliAtBEDy8Kp64CoWkI3dtm/V1NERNUveb8X7Hu661MP9fHH98Nk2K4P4i/xiwO0DFTY+lmmyWbHQwBtgQN6JmlkeETbK5wueRIk9MLDC47yuSJTyNEHBXc/e1MHws3Wsxfb3FCZ52vy11G9Te59BWbNK/k+Bn15KZoSCkwgc/ffImzL5/UJp/JgfirZN5tSNchrwl/AdFATGm2WeW7SOTv/zMSwddPCJoQ4DpYFbvVylCq5VflqqdwI0FACd3p/jScQDVllTWH5LrmLPuIbwsLuaCPQbYfLuxnIpBUBBwu6mcwZ12EFzfZzFprEbTU19KVkOmDqiCAxJGQ6de481derl8eJGKDJiQj+5lcMdhDTUhy26oQugaXDTTI9gtG9Td59Wsb24WIo4I0UxOc1FWjfbIg4kJ+quCaYzw8uyZCQ0RSHZKYHpMtX3zYJsFXrI/Cqq8kXF6I05IuUVR0FU3HzO6E5kuhdPFdgMCTmoVVX0lmTm6brvITSOBQw/B4EUKJC6PpisM0nYrl05Bu4/dEaBqaPy2+kGxrxLxmr7t3Nhf1MyjI1Bg30OSZz8MH5C8pIcMHlUEQUf7KStK462QvN60MUReWpCYJqkOSUUeYXD3Yw9pih28qXf4xzIcu4PpjTeatt6gOSnoma7y302ZkP8VphlDx6MA8jYv6mQhg8VcWRXUuQgh008+2tR+3+mfxn/hL1V+a8FdWJ6Rjo/nTKFn4R7zpqufPqq/ETnDYd6JVgi8hxDnANEAHZkgp799nvxeYAxwDVACjpZQ7W+O1/9shEUQq9hCJLgydhmqK59+OdCxKFt6xv0WNZuDJyCVSWUTF4js57vrHm01PtjbKq+tZ+MZH5PhVuh1gwiCTG5aFOLa9w4wvIozsZ2JqMOYIk9tWhbn15x7GvxokbMNZcxtIMlUf16UDTQbkalw12GTOWouABS9utFm0yUYXkj45Gmf2MMlP0dCEWk2e3UPnw10OD5/t4y8fRLigr8HstRafFwWpDLnYLvh1yPBrdElXgWnOyVe1WdYr1kcxdcIwOnbrReHWr6LNqCbStvY7XgiN3FF/ouSFW9GFYODEJ9p0oiuBlpHgsLZBJBxCSpdAsfINtOurKFn4B0AJP5dGOaypxZbQDYz0PHY/eSVTJwxrU4/AyroAS//5KVNPTeHKhcVcOtBPuk8w9giDixcFWb3HbpG/Lns1SG0ITp/TQPsUQUVASeP0zNa48XgPH3xr0y1dY94Gi0UbbZZvsQlakgmDPOQmaxgadEjVuLCfwaKNNtPO9jH25QAndzMY0cdg3EsBHAmWA2ledR+4oI/B1lqD0/pksKo8l3OmPtPqn8d/4i+BaJ7Tit6X8sfey+7HL49rsiU47D/joIMvIYQOPA6cCewGPhVCLJFSbmpy2FVAlZSypxBiDPBXYPTBvvZPEftqeDXdDo2N91Z9rRJEdW2E6cGTpZzt9eQMhG6QPXQy3pzORMp3Ub7sIcycrliVu/Fkd0QIgdB0zJTMFicrWxNzln1EmqznzP4m3TPVdKIAjumgceVrQbpkaLz/rYWhwbXHeDi5m87NK4NoAs7tabC6yOVn7TWWb3O45QQPXTM0fvMzD9NXW1zcX2fJZoe8ZI28FI3P9zp8VR5h+uoIoFafEUcy5giTzukag/M1XvnaxtDh9O46G8tUUFcfkZzWTefVb2xqrRp6prVN1uu7IITWYiJeBc8Sp74K23X49N6RCAQTh59AenZOwpz2ECDBYT8MLU3JARiaiG9vzJxYaIY37vBgpOWQM3QKJQun0vnGBQAUzZyIJ7c70nWxKndjZncGQAidgqsfa1OPwFfeXcOwnhpvbqzk0iNN/IagJqTyO8d11Hl9s83qPRaZPtha5fKzjhr/91YQXQNThw5pGid3VQvAkf2VBMXlRxm8/o3N5wGHYT113v3WZcnYJIa+EOC5Ly1mrbXQor6JMf7SNBjS02TkiwE0IdAE9M3WqA5BhhdcBLYLa4tC7A4GCVP8n95aqyLOX/vOR0TN0V0rjHRtPr13JLHDEgbb343WyHwdB2yVUm4HEEIsAEYATYlrBHBX9OfFwHQhhJByX7ev/3583yzUcdc/TvmqBygtrcZIzYlvF6a3mUDnvrAqi0C6SNchVFfFnnoouPQhclK9rZ4BK6+u57V3ViNthwUbJC+sV+KmsbS8x4CHz/Yx9qUAuckaP5/ZgNdQq7mCDMGCjTZPDPVx61thLhloouuCkA2ZPo1zexrM32CT7hOc2t3gysEmQ+ap1aDjxsxJJElR3Z3uGRpjjzBZU+zSL1vw5jaLrCSNO0/xcv+/IqzZ63BhX4PZ6yWXTN1/UqstIYSmZCVcB+k67HnyqvgoveZPVe/EdeLmtADStdENg8IXbueGoceS0S6/2XMmCK1VkeCwH4Dv83d338Sx1K96lIrSvc34S/MkffeJjh2XYJGuw7rp12PVV7aJWGcw0MDnq9ex6OI0LptnUxmUzF5rkekTlAckugYFmRq6cKkMSorqJB/ustBQ24vqXJJMWLTJ4tfHeumRpVETlmT7Nc7tZfDWNpulWxzGH6XKhq+PTeKMOQ1oCGyp+MtvqkltAUw+0eT1zRbDexvM36Ay//ec6uXmN0LMOt/H7W+HOaeHwee1Hi46/Ret+ll8F5ryl11bjnRs9vzjcpryl9B1pOvQ/pK/AY385dg2hQv/EA/EYkjwV+sEXx2BXU0e7waOP9AxUkpbCFEDZAMJD5UWcCAtMM3rJ/usGyh59f4oSe2ifPk0nPpKrMrd4NhqIaIboCmtr+xhU3ANnbUL7mj1EuScZR9xcscI1x2VTKZPogt4bHWEuWsjZPoFZxZ48OrQLlnj/37u4cYVIQJhyEqK2nEIl3UlLn4Tnl1j8ewalSFDgO2qPooeWRrv7LBZ8o2F7Uo8uuC+071MfSfM0F4GuibonqmWYx3TNI7O11i0yUIXgtO6GxzdXmd4H0WEL0d7KVa98A/Kdm9n7K1/b7OJx1h2YN+bjyenK5HyQjR/GoZhYtsWQtcRmkbFyum0v0xJjUg7gunxoiWlYxjmfqP9bZkN+B9EgsMOIYSmqSxX+S7KV0zDDdY2at41HoWenEHmkMlI12bX4rtaPQDbvHY1p3bVCAVqmXW+n5wkwWOrI/zrW5v2qYKTuxqM7G/w/DqL576MkOyBJENl+Kb+Sk1bCyFwJUz7OMK0j5XHrq6pntaILRECVm13eGdHkLqwi98Q3HOalz+8G+ai/h4sR5LuFWT6BTlJgosHmMz8IoJHF5xRYNArW+Pi/iZLvrY5pr3O8+sieI0KnnvpLSaedlObTTsemL+64EYCIHR03Dh/AQjNoOKtJ8gfe2+cv6yIktZJ8Nf+aI3gqyWhln1Xg9/nGHWgENcC1wI8detorh1x6CL8wwVNtcBCWxotgkoW/oG9829XyvbLHkIYXrDD6MmZ0cZH1VMkdBOrcjdCN/BkdcDvNdukBPnems18+XUDT3/cvCkzYEmqQvCLzjrLt9qMO9JkXYlL7yyNrZUuQ3rpbCiTPHSWj5tWhnjgdB83vxmiOhgdNQdCNvTIFEw720tVCO58L8zAPA1dQGGtZPxRJh8WOuysdli62SbFI+KaOiEbkkzJv3c5XHsMXHqkwdLNNh1SBSd0Nln85nzaZ/jadOIxdpO4Yeix6EIQqS0DoeNQBUhyhk6Oa20I3US6NuVLH1T9fJGASh0KoW5MCDbOmJIw3G47tBqHNeWvS6fcw6/OG3vwV/cTREwLzNj6lfr7tiMgBCUL/0Dx/N8DULbkr7jBOsVf2Z3UsJxjNeMvM6ezupGnZLUo7HowKNq5hddCYWb+u/nzBiylK/j7X+r4TaUv2Dtbx3Yhyyc4tcAg2ydI9wr+fIqX21aFKKxxyfQLiuokJoq/pISrBnu4fJCakhy7OMClAz2sL3UZP9Bk9R6HLZVKTywnSZUaXankcgSSj3Y5nNvT4LpjTS57JcRfTvOyarvNiV29vLKxno+WzOO8637Xqp9JDPvyF0CktgxH6HGdr+zRf6bRIN1AOhYVKx9VHBZuaJRIimYwdV9SgsOaoDWCr91A5yaPOwFFBzhmtxDCANKBypaeTEr5NPA0AB899j+X0j8QPLndMNJyaH/5w0TKd2FmdWLvrJsQgPD42TvnlsZypKbjNlRhZHXElRAMt41W+pIHJ/LQvDdZ9fa79Mu0+PNpXn79eoBlWxwu7KdUoN//1uGxc738elmYx871cfHiILYLp3XTefiTCCd20nl7p83oASbPfRkhPxl2VKtm+s0VLqMWBakMKsPsL/a63HOalzveCTPvQh8X9TPRBNy0IsTfz/Jy+9thzutjMH21WoE6Uqnd33S8h2G9VfbrhbVhMpI0ft41nQ/ef4XjhoxpU72vjHb5FFz9GOumXx/XPdr13M2Y2Z0QQKRyt/Kwk0rvyw3Wkjv6HiDaZyFdBIKq5Q+12TUm0Hoc1pS/nnl/e4K/ACld0A08OV2iHPYIkfJCjPQ8ip6+BuHxUzz7FhXJuk4z/pLSpa0qu+eOvZa0HW/w5Werm/HXm9sdLuhrkJMkcFzJm9tsBDB9iJ/RiwOcVWDw8tcWejTLNayPyaptFj2yNN7d4VAakPTI1CiscXlzu80LGywsW9I+TWNob50bV4SZc76PUQNMGsKSXy8PsXRsEhFHcv2yEKd3N1mx1aYuIlnyjc3dp3o5t5fBa9/YnNZd581tQY7vbLDu7cWcNua6Q8JfQDMOK5xxQ9y1wK7cE/fhdOorgUpyR9+DEBpGZges8m9Jyi+gaFbbyGL8VNEawdenQC8hRHdgDzAGGLfPMUuAy4F/AyOBd/4XeyV+KL4qLI3rqEhXNWUXzZoUD7JiQqt5wyYDIp4tMTI7UDzvVnJH3hW17Gi7j/qNT77i6+Ig957ix3VhW5WLocOSb2wWbrAYc4SHOetshvc2aJ+mMe5IZbMx5giNurCkV5bGoo0WEVeS6oFtVZDth3tP8zL5zRC/6Kzz790uXl0wrI/OR7tshvUxyEvRqLfUNOPAPI0z5wTolqkx+8sImgCPLph8goebVoaY9nEYIQRSStqnapzdy8Mn39Zwep+2zX6Bslz5/P7RuK5D4QxV8nWDtdg1JUo7J9qwSgvihNKOqBJyAm2NBIe1FaRUTg66gXTdKIfdHOcw6bpknXEdsQwKUmJkdYzy193IlqRaWhEfrN1GYRP+2lUryfQJFm2yWb7VxrKhe6bg7J4muSmCcUeavF/osGKrTYdUjTe22ZzSzeCpzyLURxw0DVI98KdTVWvEinFJ3P3PEMu22Iw7wmTFFoehvRR/GQGJ48LJXXUGP1WPlNAlQ+PzdRYIeOgsZbr9/PoIAoErJalegc8QHN/RYGdNfZt71aakpLLmvlFIoe3HYaHy3biui0QitAPwVwIHxEEze7T/YSLwBmpMe6aUcqMQ4k/AZ1LKJcCzwFwhxFbUanHMwb7u/wJsR5LeoRs1e7+NejdKtfKQEiEE4dIdVK54FKEZpOV1Ihi2sKqVaa3Sgona1bThl+CXR/Wgr7GLntGS4s5qyVWDPMxZZxFx4Pn1EfyGYN6FfioaJGcWGLz6tcXKrTbPDPfzm+Uhzu6hU9wgqQtLUjyQ5VdTRBf3N3jwY4urB5ss2mST4YOSBokuhOqvQFG2hiKl50b4uWhhAI8uuWKQSUGWGuV+fp2F1wBD10AINpU5DO3lo8qJ8E0bZ79Ss9pRcPVj7Nm5BW9OF4Co1Ur0ACEAofS+ktU1VL71BDIcbKZ95DRU89m9I/GkNerotEUT8v8iEhzWdtBNE9cleoPWUBw2TWW0bIviuVPw5HbHKt+lPAGr1RSfEALtEPDXsX27cHRycZy/dtdKTu6qs2SzjUdXEhLbqlQLRUWD5LRuBuNfDZJkwrRzVHB0WneD0QNMVmxVM9WUFQAAIABJREFUzfqdUpUv45gBJvf9K8x7O23CDszfEGnGXzEK8OqQ7gEHwYzhPi5YGGRoL4NfdtW5oK/BK19bWC7oQiAldM3QSfYIRg3wMPuN+W3q1vH76fOZOmFYixwWm9QWQotOqMpmHOYGY+VcFVg35bAEf7WSzpeUcjmwfJ9tf2zycwgY1Rqv9b+K4vlTka7L3tm3NC4SHXs/myE0DbtyD059pToWdVxySgpQ3+rX9dK7awjUuvxrV4gdlRbtU5WGTa9sjZO66Dz2SYRLBpsc11GnMiijMhAGG8tcDA2O66Aze20Er65IaPoQP/d8EAYJF/b3MO0Ti7nrLDqnaeysdhECpp2rGlbDjuTe031MXB5i4iCTvGTB+f1MXtpkcfXRJrYrGDPA5K3tDoPzBf/81qU2olHUoLFgQ4S8tHqG9/a3efZrX9i15ZQt+SsATkMVIur1KF0XoWm4DVXkjbkXPa3RpNiuKaF8yQNkD1OrXF3XqX/j4UN2zf/tSHBY26JixbTotK/L3tmTVH8XEqehqtlxQjTlL1Wmko6NNyUdaP0gbOXHG3GCLh/uDvFtpcX5fQ3++a1D/3Y65/Q0eGOrxQmdjDh/ZSUJzu/TyF8/66AzcmEAW0KyCX87w8c9H0Soj0iuGGxw1twgEQc6p2l824S/pr6j+OsfQ/3csCxIVQQmHmeS6hVc0NfAZyj/yCsGe3hnh8MJnZTUw6rtDrtrJY/8O0xWsk6KcA85f0EjhzXlL2jCYYFqOlzzdHy7MEysit1xDkvwV0Lh/rBCbMpxb2Ude+67Bsd1QWi4VgTN9JB38Z/w5HWPB1+uFaHo6WspWXgH5f60+ErKaagC18UQaosVqiUjpW18HbvkZVEkXKrqgoDFLzvrvL3D4R9DfVy3VOnhLN1ss2yLjZTKt0yLGmRn+QW3/sJk/oYI+aka2X7BwHydEX1M5q6zGH+UyVk9Va/WH0/2cs2SIIYOX5Y4JJnQNUPjkz0OOUmCM3sYlDZIRvQ2eHu7jaEJUj2CqpDg9O46b21zuKCfh5e3eQl41ITQjhDsWBcipfjQ6n4J3SBnmNJzK1/6IPnjVRNqTPm7YulDqk8GomVjEAgMw6Rjt17x59l+yK44gQS+H+6bOJb6+jpqKsrj5fZYaT1v7H3g2HjyClQfl5TseeIK9s66GaehCs2fBjTyV6zR2wrV4U/xt4laeoecdGpqBbbjIqlm6WabrCSNaeco/tpe5VIesOMm2eVR/uqbrTH1nTD3nObhla8sMv2CTB8MyNMZ1sdg0SabywaanNZdZ9kWyZ9O8XLla4q/vqpwubCfwYubbAoyNLplCPbWwxkFBgELrhzsYdIbIWpCqnfs1O46L2606N9O54L+Hl7bqiENjYAnDV3T2syt47sQ47Cm/AWNHFa68A51XIy/hLYfh/2v81ci+DqMEJtyHBB9vH5HCf52ndj8yGVxJXvXarr6k0p0VQi6X/kQwbCFMDzsnX0LhpCceuMDgBJubSuPxyUPTqS8up4Tr/wLHl2wdLPNyP5GfFXoN+CVMUnsrZN0StM44h/1FGQKHCl45OMIQ3oZZPk17j7Zy73/CrOjyuVXXXXGvRRk7roIfgN0ofzUkjyC9imCGWss+uXo7Kx2KKyRXHOMh+wkgUdXFkOndjM4fkYD6T5BbVgSsaF3jsGvz+7LvxbXM/7+hW3apNoSdF1vFJ30pRBd+schDDNurC1dW2nqVO6Jz9glDGsT+CkgNuUYw56dW5Bo7J13W7zPy7Uap641fyrtJ0xj7+xJ5F/yQJS/JsXdHqBt1dKf/v14enRqx5+fXcre9f9k4fowZ3TXm/PX6CQKayVd0gSDnmpgRB8Djy74ZI/DU59ZJEd7vO79IMpfXXQueTnI7C8j+AxBll/QJUPD7xF0SBE88WmEIT0NcpME01eHKQvAFYNMcpIEqLZdBuXpisO8gpqwpF2yxhPDU0jPymFtbT0//8UvyD3/j20mNdESWuSwKIRuKq6Kc5jbjL/QDLQmGbIEEsHXTwL+9GxArTZ004O0I/i9JsGwjXRt7IYatjw6ockZAssOx9Xy2yLjBUpkdcKfZ7FjTzmhYICCTMHeOsltv/SQ6dO4eIDJ65ttfjmzgbCjem+DtuqhmD7Ey6SVIZ5ZEyEvWdA1Q+P8vibLttjc9gsPw/sYvP6NRYOlypQvfWWR5Vfm2mUByR2/8jBpZQiAxRstnvk8QkOk0Z834kgCUR82rw5zLkohbNcxvJc4JGn6WAagqryUPTu3NNkjELqB0Ez09Ny4qGqMx/S0dqDp6p/rYGR2wK4pQU/LxbYtindtJ79zQZteewIJtCZ8OZ0Qmk5SfgHB0kI004u0I/H7cqR0O3ZtOXueujq6RWBLhzX3jSKjXX6b+QMGAw1cfd9cvIbB7qJiTsxTBtfXHOOhILORv34+s4GIo+KIkC1ZtMnC0ATTh/i47nXVn/XkZxHO7GHw+mabO0/2MmqAyTOfR5C2ZP5IPy+ss8jyCU7qahC0Lf6922HmCD9XvhbEo8NLX9k896XVIocBHJGrk+4TYAUZ1lNj1cZtnHN+m3wsQCN/Ac04TI9pekU5jFhAFZWVAMVhQtPiw1+x4SIhRILDmiARfB3GMHRBsGw3Vn0VjuuS5liES3coW4roN9RpqMHbrjNdL/lLs3O3PzahzbJdMcxZ9hHbtn+LV1Oj2Ee31zE6CnL8GraEdsmCa472UB2SDO1lMP6VIKYGI/oYdM/QOL27zrItNu2SNZ5fb3FuT4MJrwZ57ssIlgOGgB7ZSjX6o11K7f7t7TZXDfbQMU3j7J4GjgtXH+2hKiS5cUWIfwzx8crXFos3WaR4wNSUWvUxT9agGwFSM3MOSZkxlgHYOGMKFUsbZSLCtWUIoUebj6MQQmm0uQ52TYnKEDg2wvCgmbHAWQVtjnNgd4MEEjjcEMuWSNchULwdpCRSuj3aOqGasEsX3Y0npwsdL7kvfl64vJDwqkcPKuPVUFfDnu3fHHD/p++uoGFXEZk+SbZH8sY2h+uO8WBqYLvQLklw3bEeKgKSYb0MLns1iCvBo8N5fQwyvIKsJA2fIWiISKqDkle/tpm/PoLlKs2ujmmC+ettFm5UWbDVexxO7qpkeFK9gnN7NXKYEHDDshD92qkM2D+/dchN1the5fJpkcMxT9Wh6w3kZqbS4LStvVDTDGZTDrPqlbqKlDRymBCNQxFRDpOui9CN5vxleBIc1gSJ4OswRihs4UqwHTe6RcQbVQVEsyYSNxxkx8zmgZZtNW9OPZBq/o+1HSqvrufltz8h2y/57Ykebn87zNoSlz21LvM3qOVasgkpHqXcfMUgk2G9Dd7cbnNhP5PO6eq9ZPk1nhzq47plIS7oazCqv1J4PiJXZ2ulw9STVK9X2AGPIUkyNa45xiBkC8YdYfLbVWECliTbLxh7hFqpXtDX5LWvldXRhpuymfVFiKfWe7jpqTcPeblxX1HBz+8fjZmShS+nE5FwCARohkeVYjRd6X8ZXkoX34WekgWoSUfp2AgBxQvuIJyTC9BmGYEEEmgNFO/arv7GY9WpmDJHvFol4w/cSEBNAUchNIM0b6OubdNMTFMcyKYmUF/H63+/mVvPO0JNOe+DmvogK8u3kJ8s+d3PTSauCOEzYOFGi2e/UFn0ZE+Uv/yCXx9rclE/k9e/ieBINcjzxjabEb0N3i90eHqYj18vCzG0l8ELGyzSvNApTePo9hqzvrRwpNIE+1lHjTVFLg+f40MXKui6flmI2rBkcHudYb0N9ta5vL7ZwtQFv+pmcH5/nZkbNP41YyrZ6ckA3Lvw3z/49/Fj0ZTDYsr09SEbX47y69QMNZUa4zBPThekY1P22l9VsBXjL00H6cQ57H+dvxLB12EMF4GR1QEjLQdh+qhY+SgQE7IDIyUL6Ti0v3z/qZFd08c3e9xUNb8pWjL5/j6Ys+wj2pkhft7VoGeWCoQ0XXBiR50rXgth6pKgJSlrkJQFJKfPCWBoMKKvQX6KoLRB8u89Dqd3N3AkDM7XGLM4SMRRshH3nuZl4ooQXdM1Rg0wmfWlhSYEoweYZPs1wg5Uh4Q676Ugjqsa+PNSBJcfZdItQ6MqBCc+XUVmkkGKcP6/TAXFsHHGFJxQANd1cIUe18txGqrY/fhl0SkhHc2fihOoxkzPp9Pljb+volmTGDjxiTbtf0kggdaE4zjR8noeQJy/kOA0RDMojkP+2Psxczo3O7do5kTw+uOP9+0li+FANjWuY9O+YxcWbwq1uH/9urXU1AcZNcjkmI4mF/RTgwH3nJHE6l1hxiwOkOzRKA8o/jppZoBMv6A8qPqzclMEH+5y2FHtcl5vxWGD8jVe/dpG18BnCB49V/kxnt1TZ+VWm2y/hu3AkN4GvbOVtIWUqrfsohcD5KcoB4+8FBHnr6c+C1OQbeJFMHvph0y+5Kwf/HtoLWycMYVQeSkAemo7CmfcEOcvoBmHCSHIO/+2uDRFjL+gbXv4fkpIBF+HEXJSvc2CodLSarSkDDR/Grkj70QzPRTPn0pTYW2hGzjRLJdTWxpXg5YSvOfchW1FVIZMQHhLEaahavSGLujXJfdHXWcs66WHwwzp6SXLL7nsKINLXg5xSheN8/rovL5dJy/Hz57yOmzbZvQAg01lLi9tsqMq0Mrn7IK+JrarJnw+36smNH/W0SA7SXDpQJMFGyxG9jOZt97C1JSJ7cKNFgKoi0gsR8laDMjVmL/eQhMaZ85twKsLHjnHxy1v2dTpmei63mZTQS2tyqvKS5tZAjmhAB0mPELhjBviQZUVCWNXl+DP7cKuZyeSO+JWHFvZDGWdfQPB0kKEpsVXmAkkcLgj5gkI6jsgpWqqN7M7kT/ufgBlL9SgjleGzBauFcauKSWeCQvWUhGo5tdnD0JoOlKAsfUrAHTDQNf17+wbSknP5ORr/9TivrrqSj55fxQ+XXJuT1U9uOIogxtXhCistmmfIhhzhIelO3SkZmPbNmMGGFx1tIez5jbw5jabBRssJMpr9oK+Jk4TDnMcl+M7GRzfUWdEH4PtVS4+Q3Bagc4nux3e3enw/Forzl8xX8fiepf8FI0NJQ4eQ/HXpJUhQsKPz2vw3prNbRZ87cthVeWl+1kCOaEA+WPuoWLpQ3TYh8PMjDyK5kwmZ9gUzKyO7J0zGce2CZXvTvDXAZAIvg4j7Fv+855zFznn3Yonp3M8wJKRALmj76FixTRkdHqo5IXb4h5amj+N/DH3oCdnUHDNo+yYOZnsoZOpWPYQZtTnESBYtvtHX2cs63VSNxOPDpUBiRtd+Y19SZUIB/TswKrHJ1NeXc/5tzzEqP4BPtrlcHR7yfiBJpNWBjmhk0F+qiDiQPcMjSE9DZ77MsLdvVTJYcJRBhctCnFuL4NTuhp8tMumXbKgKqi815T/o8bMET4KqyXv7XRYPi6JG1YE6Zau0TNLY9SgdHb3ubRNM14trcr37NzSrNcrBs30xW02bNuK67RJx2ZvVMtNOioAE5qO01CNJzUL3ZfUZtefQAKthaYlwKkThlEfsskcMhk9vXGhJ8NB9LR2SCuMG6ylfOlDCN1AOrbir7H3oidnkjPs/yhf9iA5Q6dQvuxBPLnd44bN4f2MuL8/Pl2xkE6eWtLaa2QlaTREHDQBg/N1TnmujvapGqUNkgE9O7DgL9dy0ZRHGHdUkBVbIlx2lIfxA02e/SLCuhKHYzoY5CQrX9kYhz35eYQrBnvQBFw60ODCF0OcVWAwd61Fmlf1oOYkKf7qmaXxzHAfpfXKZmjZuCQmrQzSv51OQabGuCO9fCXb88oDP7w15IegpSlVb06XFi2BdF8SRbMm4Tg2UspGrUnXpmThH6JHScqXPogbrEvw1wGQCL5+YrBryylf8kCjB6BjY2R1JCb+VTx3CpGK3TgN1Wx/9hachirKosdHqktwNI2Udh0O6hreW7OZTwtDrC50+fuHjX0bQsAx7Q0GtddZb6tGyznLPuLE3CBCwPTVEVZd5seVSv/l+XUWL26yCFlKIqLBkqSYgpxkQWXUZHtwvsboRQEyfIJkj2DBSD/LNtus2m5z2VEm5/YyqAur5v6xA0yeXhOhKgi/PdHE1GB4d4tJh8DDsXjX9maNpKVLH8GuKeOzv4xE6Dqu67Bj2jiEZuBJzYofJzQdI60d2efeHC+97J09iXbDf4trRyhbfBcpPgOIsH3Gjf/zfRIJ/LRRPP/2eMkxd/Q98Yne+P65U4iUF+I0VFG84HaEEFED7loi0YDL0fQWXc6/L7Z88SGF3zYQsSze3WERlThGCOiQpvPl9Rn88Z0G1tte5iz7iJM7RrBsl+mfRnh7vB9HqizXsBcCVAVtFqy38BmQ7IlymEegC9hQqnp1j22vypEFmRrTzvWxbIvNv3cpP9sL+xmEHchOVq0bz34RoSIIw3sbZPgEZxZovLRiJxU1DfF+r7ZATUV5s6lsx7bZNWsydo2yRwNwXYeieb9D6CaelEyklNGpRw09LZf24/8OKP7qfMU0AsXbKFt8dzP+gkSvagyJ4OunhOiESf74Bymedyue7M5EKnYhDCU/IXQToRmYWSrNm3fJA/Ex3+J5t2JmdcKq/PEZrxiWPDgx/vN5U6ZTuLeU2rp65l7gI9OvURV0WbhEEcZbq7/ii68DPPuZpFu6YOSLQaYP8TPtHB/PfmExZ22ELukau2tdXAkNEThzbgBDqHR8VVAScaA2DFUhyQULA6R4lO1HZdDimTUR6iOQ6oVkU2XRxh9l0iVDoyIgyUnzMrwXbd7v5ThOvL8BlCVK3ug/xx4BUL58mkrLZ+THy8Mx5Xrp2kTKCxFCNQf7cjoRLi8kMyc30R+RwH8FpJTIsMrcly99MM5HQjeRjtWMv7TkTNyGKnJH39OMvwDsyj2I/Xvovzeue+B5ANa+tYhbjmzglocXxjns8aE+NpVZnN3TYOGSnYTCNhu3NvDYvyy6ZwguivJXTpJgZH+TBeuVQPTuWpd6C1wpMTQ4+/kAejRCtFywHMmmMpfzFwTI9AnKApJdtRaz1kYI2UrGIsvfyF8d0jQqA5JMv+C6n2e1eb+XK91m/AVQbkfIGx0r3SoztxiHGdE+vqbK9U35C0A3zAR/fQcSwddhDI8mKZl/e9wzuxGyyf8Cq2KXkifQdJyGSornTAbXjnujAWjeZIrnTMZpqKQ2NROrvgotJ+2gNcCWPDiRh+a9CXs+5xf90+PbryuvYfbSDznzuH6cmFPP85+WcUwHjc+KXEa+GMBrKKPYFI/g7lO8THojRKdUgc8UfFXmkpWk0vO2K0nzCR4+28eEV4MIJGUNKtPlSNXYWhN2STKFmgasd3n0E4vHPnUQmk5qpgkcehV7kKDpGOl5Sj4CpVIPIAwPuK6SlwB1XHT1b1cVHcJrTCCBtkNKSirVZVsomf97mtGX08QsWwi1IGzKX3Mno3n8uE2shzRvMsVzJyOlxG2oRggOemJOGAbz/7mJv00azbL3v2iRw+jYjxOP7MGsV1bF+WvUiwHMKH95DcGfTvUycXmIjqkCv6lRGZSUB9TgUEXAJckjeHKYn5tWhHClJOKqMqWUkGQo4dVdtS6OlJTUS6Z9HOHRTyxMUyc3MxUI06Gk7fq9WoIdCaM4TIUIZnYnrIrdjRwWDZjjKqoxrkvw1/dGIvg6jFG3/O79tnnPuYtknwdNCIQQGOm5aKaHSPkujPQ89JQs8sc/xJ4nJjQ7L3/sX0BK9j53E6fe+ECrqt6/t2YzRaVhXlhf2mx7h5LNAGzYVssZXQ02ljk8dLaPK14L8spoPws32qR7BT2zNK49xkPQkvz+l14eXR0BqYxo85J1ftXVoGeWxpBeBku+sblysMFVgz1ICVlJghejDfiTf+7lj++EeGa9xq1z/nXIZSVahiKnihWPYteVg2MTKWlurOEGqqOr/oNYzieQwGGGliQgpk4Yhm6aCCEQmoaRlgfIeIZeT86i/eWP4Fphip6+Jn5e3pi/IITAtcKUz/8dKT7joDMqmqaz7IsiahoifPPNgTmssLiCMwoa+euG5SFSPHBmgUmKV3BGgcGF/VT/68TjPLRLEjy6OsKC9REG5Tfy1wV9Deavt6gLu4wbaHLVYA9ZfkG7JMHjn0ZFZwW8t9NmS62PD2fe0aalxu+C3OdxyYI7cIN1yvrJsbHKdjYeKZSXY6OmVwLfB4ng6ycGTVPCq9Kx4034rhWJi9s59ZUUz/st0nWJlO8Cx477bQE4gWo2PjOlVVXvm5Yh90V5dT0X3zqNJG8Nw3oLftZR5/y+JjO/sFhb4jLvQh8SwQkddS5/NciTn0XQhCBkS5JM8BmS4X2UXdEVg5QC/uJNFi9t0XEijbplliN56OPo5+G2bZlx38mgmopy7Bd+jwA8UTNsp6FamQTXllCxfBpOXZkKrvTGr5wQGlK6SMdR2UpQq//6KopmTcKqr6Rztx5t8h4SSOD/F3RdRzp2VJgzepuP8VdDJUUzJyIMr7JU24e/cG2s+kpScg7+exEJBRGajqt5+P2VwxlYkE+yvzkvNvKXxrDeguM76ZzVw2BDqc0HhQ4LRvoQwIRBJufNDzB7bQSByooZArxN+Gtkf5N3dzqUB1yWbIGXvwrguo2BjuVIfKbAcSEnOdxiqdGyHU6ZPJPUJA9Dz/1xi7WWprOl47Dz0Uvi/BUfBhKCimWP4NSVoSVnNuOwGH8Z6XlIx2HvrEkJ/voBSARfhykOJIoKcGT3PGpTUtjz5JVxR/mYmzxCw65WZa5YXR5Aiy6rdE1rMePV2iKsMcxZ9hGndIb3NkvmjDDRhCKq4S8EOKenTmUQDE3SIVWpST/xmc07T/2eEVMeoU9qmOM6anh06Jqu4bjQJ1vj2xoJDrx8aTZ/eLOasgaX8gC46AgBptl2shLw3dONMS2bz+67GDO7E5HyQqRrN/a5ZHdGCIGUUpWLUX182UOUwGTJwj+Aa5PiM0jJ6dFi9iCBBA53HEgUtaainILOBVSkpFP2wm2Ea0qVlEScvwRCM7BritUwSlZMpkCiCYFTW0JmTm6L34sfKsR61OkXMfD0C9k+/w9MfPIdpl58PBee1L/ZMfvylxBC8dd8i3N6aJQ1QFmDi6nBqAFKNmfFNlWW65cR2Ye/XIb0Mpi91sJyYN3/9aCqqpKrXqlhS4WLz9RBgK5DrSValJb4YvNuBp13NY4U+JJ+XFasJf7y7sNfe3ZuYe/8qRhpeUg7TO7oe/DkdKFo1s2Y2WowKM5f0ax99pCbE/z1A5AIvg5THEgUde+9V7LxmSkEK+sAQe7oPwNC+WilqXHuPU9ehXQsiufuL6Cqy5atHVpbhDWG99ZsZsO2Wkb0hFpLo9YCj6lzYX8Pi792+WdhmDRf4+xSTorObdMX4SXCJ3tsPt4NM9ZESPUK6sOSypAkw6/Tr2MSvXv1YGEvdd70D8rZ3P6CVgu4vovIYf/pRse2CNeW8cVfx5KenYNrRyiaORE3WKsOcB00j5/i2bc0nhMTmnRdPLndAdCTM8hI8SeaVBP4SeNAoqhr7hvF9hk34gcqa8tQHHZPM/6CKIdJSfHc5gtFGao7YDblhwqx6oa6/X29u4r8rj2Y/8FmPtxa2eyYV1eupqKyir5ZLn/+QFmo+Q04vqPB0u2ClTttcGySou21OckGJ3YxeXNzgA9qXd7/Fh5bHSbJFAQtNTyU6tXplGmwqSTC5nIvv+qdjbcwhJXehWMGNg/+pjz3r2aPSyuqyTqhL0un383AX5yB6Wm5gnGw/FVVXoq01T3EaagG18G1ws04LMZfenJWnMMS/PX9cVDBlxAiC1gIdAN2AhdLKataOM4B1kcfFkopzzuY1/1fRvt2mWx/fjIFlz5EYVktRnp+i8cJw4OmG7iOg9NQGV9dugi859yFpgmO7JoTz2rtLaui9LFb93sep26/X+cPwpIHJ3LelOl8UFLOB8ub7jEwzRBpaT6ab4UvvtlFTloKeaZLZXUtM0f4yPQLqoNw2WshygKShm8bOOnx5pObrdlU/11EXlNRjhEOx02xpWsTs35ypRovFwjcUD0g0PypoOnkjrpbNdsDSBkvNUqkKh0ncMiR4LBDi4x2+fEb82+GHYd0nGYZ+qYwUrMBsGvLGrNjwK6d2/jNsOMwNMGjSz4BVLAREwZtCt2XhH/fJ94H5/3+qQPu+914eOrWS6kuLeTdMsmK3ZF4mTC1vQoWg9WlBKLbCsPwzTc12K5OUmo6dn01v/25h1SvoC4MD3xkURMRVBXbXPJyAF0T+LwewCRFpNJ3/H0tXUYcfaP/H3HCad953MHyF6hJRsMwcAA0HaGbjRzWhL/yL3+YvbMnJTjsB+JgM1+/A96WUt4vhPhd9PFtLRwXlFIOOsjXSuAAaDrVKF1FDXkX343QDKTrUL70QfLHP4hduSea1teRrsP6l++Kn+cKnW5X7G9TtP2xCQd9fd/VE/ZdiE1RnjKwcQLp5uoaFu1ux5UPvXLQ1/VjUF1WjCPVJKOM0bCmI4SG0DTajbwL3TDIt21KF98VHwb6T4j6pCPE9zwhgdZCgsP+P2PfRm3XCqvv0nDVu1q25K/N+AuU92PJ/N/Hz6mvryN/zD37ySUUzZoEvoO7zcWkKX4o3pn/BL33vsL4k3Li22r8rZuh/zFwbGs//oohxl/tbIvSF+8kxWcQronu/A/cJESCv34IDjb4GgGcEv15NvAeLRNXAocYFSsfB+lg15YD+0sYCN3AdfedaTm8cKApyga7mOqKUjKyf5w90sFACg3dn4qRkd9M/8OuKkK6LiUL/4CRloNdW6Z2CAEN1ZQveUA9NLxkn3MDSNT0I2qMO1Y+0PVEJ8AhRoLDDkNI16X01fvixszN+CsaOMj9ZvIOL2z54kO+KA2xcF3bZeh/DKRQnCMde18NI0oX3akGHdSR1FQojmrGX2erakmMv4rn3ILm8WN6vAn++gE42E8qT0rDFRhRAAAgAElEQVS5F0BKuVcIcaC7oU8I8RlgA/dLKV890BMKIa4FrgV46tbRXDviFwd5if/NEM0ngZpA2mH+H3v3Hl9XWeb9/3OttXeStmmbJmmT9AyKjqKOow7qz3l8ZgZHBSuMPjiAnCoDPINWOXQG5KwOgjAvKkgRLTxKC7UUUEcGUMQZHWZ0HMUTioyCPebcnTRN0ibZe611/f64117ZObRNm8NOd67369VXk73X3uvebfPtte51HxZfeDctD7j782WLVqKRomGO9sc+5aYNhxHl7/0UAEEYsf3BaznuvFum8gMc0sF6zP79+R184/kfUvUXH5jiFjleuoLWTVcifiqZqhTu7yRVVQdhSMMFd9LywGXUnHoZZdVLyXa1kT+wfev1tG290b0oCkhV1RP19ybbeLhtOKz7fgpNaIYV5te5a2/mnaedPRltLinZ4VsFxdum1Z+3Di9dRuM957utheK18dof+xQ60AcIH111kntJFJJ64k6WrR65pVexHG2P2aQLQ1o3XoFG4WCGCYCy5KMbad3k/s+oOeUT+Cm3b2VhfmWeXEe4fy8ahqSr3C3jfIZZfo3dYYsvEfkeMNrAouuO4DzLVbVZRI4H/k1Efq2qfxjtQFXdAGwA4Ed3T+9Lm0k0fJPtwsfzUvNqaX/0JvzKwe1qNAzQMCDq66blgSuSQZHZzG5EPLen2kCf68bf28z8hhUA7GvZSeaJO+jb00guCJNryjCMOP7cdcm5xzPzcaK847XL+PIPfghFKr4Wvu8ywkgpq12eXCW2brqSmlMuc/uZ5QbHhaTKyonmL8JLlxPlBpK/q4YL7qRl01p8InL9PVTW5v/Pz9r2GxNsKjOsML/ue3bbjM2vwg22hz8+XMeTQwumoDsDUUDr5n9AkHjZnF1D8qth9Z1k27Yxu8ENvu9r38Wex29jILOLMBhcxHWgew9Bj3Dd6lUHnfU403jpMurP/SdyXa2ka5bGxZfQ8tWPuz2CVYeMlkhX1bkN0tNleLPmsfjC9bRsvBxJVxD195JKpQsyzPJrrA5bfKnquw72nIi0iUhDfMXYALSPdpyqNse/bxORHwB/AoxafBnncEVO7dxyWjozoBE1p1yWPC4obY9+mkVn3ESqegnBvjY6nlhHumZZMjV4NBLvl/b64+r49fY2Zi10U7y759dw4sW388P7bqJpe3NSiBW2Y6oLslTKZ146JJcdOOhsn4MZ63T0g/3n4RVsn6EFg1MTUUiwr9VdvStkB/rdwzm3jIeGAeGBLlo2Xk7Qk2H+ogabkj3JLMOm3lj+Pac8IRsxJL8A2h/9FIvO/AypBUtw+9Veedj8yluy8oRkU2iA8nkLecOae3nh/rXs3fEHrlu9asjxx1pBNt78qqycy95M+2BxNezyQMMcRO7iW1UJgxwS35rMZ1i2fTthbyferHloXHRZhh258d52fBy4APhc/Pu3hh8gIguAA6o6ICK1wDuA28d53hkvX/CcdOk9ZH4wdIkIX5SOp92YLw0Dov4emr54geuliQLwUrRtuRYN+sng4fl+PCuyi+/dciGR+KQr3erwZRVurlC2v4+Gs27mxOOGzkwa71IUR+sdr6zm5d07WPqKVx/R6w43Hf1w4Xbd6lX4vk8YDKBBLnleo8DtaxZvsxH1jXyPwYPBF0FFbEp28VmGFckXHv9v9/P2gy8OeVziNQnbH70JzfUT9RXkFwBKy6a1aNAPeIjvo3F+/fzWD6HikY57mN1tMAj7D1B/1s0sWXnCkHMdbBmK6Wosy2kcLsPWvP9thN1trsgKcwVHCOKnwfPdFk4IB58xJMmyH1Z0HZ3xFl+fAx4Rkb8FdgEfAhCRtwB/p6oXAa8BviwiEeDhxkv8dpznNYcR9mRY+tGvEgW5ZOPtYF8bmSdcoaZRQMPqu9Ewh5dKA5Dds4OOxz5FkMsS9XZRXllFtr+P7999Ff09e8kVdOcX21++cQX/+p2njrj4OpzDhVtl5Vx6n/68u3qscEtIEIV4FZWkqpcke5+5CiuFpMriLWkLCPEihCdgis4ybBrqfPoeNMxRf8Hnk4ucYF8b6eqlbuu0KEzyS+L8yu3ZwZ7HPk2Uy5Lt3UtZ5QLC/gM8v/5Ssj2dbpbfDHC4DFuy4jh6v/eFwQwD8Hy3JI5InGGa3DnxUmUjpjaIYIXXOI2r+FLVDuDkUR5/Drgo/vpHwOvHcx5zcKMtjhptb6Nxy/UQX9W4bTzyT8YL6xUO0k+mB4vr8QpCak+7inl1S9m5+TqigT73PsCvt8er5/vCa5ZP/WzDvCULqwg7npuSc71w/1oOtDQmg3sh3tIp7HLLdgRZwv1dNK4/D4DmDRcT5bK0bbl2xH6NGuTwUyP3pTvS1bnNxLAMK66D7RbR/q3bUdWhm3BD0lNTONNRkqEAQrqymiDIsfC0q5m1aDlNm68hyh4gf/nTtOMlwG1xVL/s+Mn5UNPMC/evpa99JwqjZNhexE8TZfsRP5VkmIYBzRsuHtx5IKZBjj2PfWpE4WX5deRsXmgJm1Xurgj7BnJE6jqQPT8F6QqCvc3kOhtduOWLL43ojxdV1Siku62R4EAPDeevo3XzVaQWLGFWRfyeexpHO+WUWragjO7ODPOqaw9/8DiE/Qfw59aw/KJ7hjx+oHUbmSfuQPwUNe+7krJ4nEmUyxLsayXz+O00nL9uyLi05gcup3KUdYeOdHVuY0pZfsmCdFk5uexAsiKCl3Z7Pkb7946aX9meTkDRKKCvfRdhX/eQ/Cord4s6DwyfYVnCXH7VUn/O7UOyaCCzi+avXcPKNRvZcfd5LP3YpuS5bGYXqfn1tG3+B5Zd+IXk8Xx+DS+oLL+OnBVfJebFXe3kggCNIva17ASg4zvr0WAAEKK+faTi8RCd3/0i1e/6v8mMx+62RlKV1agqqQWL8cQNxFfVEevBTAcffOtK/t9PnuFN753Y6fwv3L+WsP9A8n22pxNv1lwaH7oazfUnj+dnlebHRQwOwJ9+f1bGHAtad29LtroBd4EDSsd37kGDLOL7SU9MxxN3gJdKlpfoa9+FX7kA4vyS/ILS0zS/JkvXntYhK/3n86vtkRtH5BcKjRvduN1RJxCZSWPFV4kJQmVew0pa43VbOr5zD2HPHrd/Ggy5UnTjv5TuNteLpVFI0NsJInjies6Cng7aNl9F0NNB84aLk/NoFNLi+3gaFm0pirf80XLu+sFPjug1+VlAXXta0YKZi554XLd6FV17WknNrWXx6juT53Z/9TI3BTvbR8Pqu5LHo9wA4b522rZej+f5BB1NQH67DifX2USUGvwxy/V2Ulk7+t50xsx0YRgyu/4VbneOMEguHKO+7sEMi7fDSVUvoXXTlfS1u14sjQLC3k4QDxEhXVY+Sn5Jcmyrn0I0GjIDcrrfJjtcflVWzkXFG5lfQJTtY3FBfmmQJdfZRNd3vgDiJfnlDg5wPYjhkF5Cy6+JY8VXqQoD2h/9FADerHmD+6eJgCpBZyPh/i4yT6xztyKBKAxIVS8l6GwkUne7UjyP+vPdtkMa5gj2tQNK25ZrYdY8fN+nvT+eFdnTO+Ufc07UfdDnDre57Oz3XDFkc1mA6GvX0t/ZzK77B4vIZHPsfHAH2eRCWqMcGoVEQRZQ/FQaP94uRft7CH7wRQpHrdggVWPGIP4By184Zh6/fcgekEFnE0FHI1FfN5kn40lEBfmlquSyAyPyCyDsbqf1a9cgFXMREXr73U+oXzEbRsmLYjma/Nr98PWE2f4x5pcCmvQyapTDjycvRJGgPe1ofw8D3xu87Wj5NXGs+DrG5RdjbensIYqUMIrwZ1eB5+PPWYBb+UvdFOKYhrn46hCW/5+rCUIXdNkgpKx6MbvvvZDWB9fi+alkVWkg7jFT0tVL8edU0XDeHcm4st1fvQIqmHJvWFZF48u/ZekrXwsMDay9mXbqz3JXy4UDbPPjEMIwpLy2cFAu7s/FT7PwtKsRz6OidikHWv8ACG1bbxiyGrfbzzGNeB7p6iXkOpuS9wV3NWpLSRhzcIXrUe3ryBCEOfzZVWgUkJpbC4jbyive2Bny+eUKCX/2fBa93902C4OAdPUSGu/9CK0PrkVGyS/xU6gq/pwqFl+4Hg2yyTioidgHcrzGm1+qHHF+peYscLdmC5aV8FIpfN9nQe0iy7BJYsXXMS5/m+/4c9dx4sV38P27r2LZRz7P9q9cSf7iRkTchqcyuPF2Xr7wCoJ4Z3sgNWcBNavWIp5P29YbCDqb3DYUULA2zPTYQPWv33YC1337O0nxVTjw8/n1lyaLLR5qgG2UPUDDBa6bPpfZzZ7HbyNdu4xcJr+oo7gxJJ434uq74+l7iHJZGu+90B2Zn0UqgqdDr0oP5UhWBDemVBT2oly3ehW9/QGLV9+Z3CoDV0wIgnhewVpfjuYXAC1YBsefs4DaVWvd5ttbrx/cFzL/sxmN/edyqk11fiEk+9C2bb0BBEQG/5yWv+JVY2q35deRs+JrhhLPB/GSlez3tezET6XJdrUR9nW7WXyej8ZBla5ZiogwEP9Ai++T7Wonv5FOf89ednW7vSLDXJblDSNnIE7GmLC/vvEhfrujjSe/+2+Au1ps2vESvj9yv8ujJV4KL76VWFZeQXag333vp0BD6s+9nbLa5WTbtzO73l2dHmxW48FYV74xYyWI5362xE9RXrucA63b8FJlBF1t7lZkQX7le7vK4s2kgy63XE6uoxHViPzqX9meDjq6I/7uPX9MqmwW82uGZthkjAcbfmtxKvIrmT3qpyirXY6kylj6sU1EuSzhvjZmLXIF387154/581p+HTkrvkpMWcUsdn/1CoLevXiz5rnF88pm07LxCtfzpQpRSNTXjUYhfQP56FGi/FWlCAtPu4qOZ74EPR0gQraj0fV1xcWYu+J0tyABvDkLqD31cuY3rOClu84fsfYYjL4a/kmX3kOmZ2DE42Mt1DI9A9S9+a+oPfkSRITnbjmD9n+5I56J2JNcQYuXYsnH1h/6zTRK1g+KcgNolGP3A1e6maKej0YRO+6O18EpWP9GxDvoWxpjxs6vmE3zA5cT9u51i356Pqgi5bNo2Xh5QX71AG5WYy47gBscnu8VExaedjUdz9wLPR3Ja7Id8fI4SYZFSX7BYIbltzca62r441njavgSDePKLziC/AoRz4/3mp0edzFmGiu+Ssw7Lv40AD+87ya62xvdD7E/+NesCp7vM2/hYg4M5GjdfDUAQW9nssyExmOWNHuAVFVdfBWZAiHe02sukq4Yuj1FFKLqlrdQhac/P1hoeely6t93GW2dIwNqtEViAb532yUj9pGE0Yuyhce9hq7/+RFVr3kHiE/DBXcS5Qbcitg1LlxbvvqJIVeUlZVz2bX5mjiUQrLt2wG3LEfU103b165OlpJYdObNiHioRqTm1wNK2+ark6BLLVic3P4wxhy9Ey9yWZAsDBqGNN5z/mB9oK63a9bCpeQGBtBcP62bryLs7Uw2rc/PNs7nV8eT69AwiHvLlKCnw91+G769ThS6Wc1RSNs/30Zb/LCXrmDh+y5jb6adW9ecPaY1rlp3b2P35muOfC/JOL80yA5ufM1gfoEbG7dkxXFHlV/pqnpUldYH14LnU/fh22jdfFVyMa22TM6UseKrROWLsBfuW8u2h65MHs+PDQOGbKD9+zvPS44Rz2PP4/9E1LePRWfcNGQF/I7vrCfY10Zqjtt7LR94XkUlIh5li1biz6mi9rSrkvdr33o9uzdfC+iIgqplz15OLPj+xV3tBKGSC0J2tXe5tWjiwk/EY1d7xKxTP83rV9QmRVj1ij+i+V+/4YqvAiIeufhqN9zfRccT68j1diYzdta8/20sOvMztP3zbaQWLAZccC8687OAu1rOPHEHqfl1btyIn8JLl7mryvh5YNj+aMaY8coXYeB6nPKDvq9bvSopdAo30N5+14eT48Xz2fP47XF+fQrx02iU/xkV2rZcS9DdRvujN+HNWYCIoKpIupxU9ZJR86t58zUA7B62OfdoY5pad28jOzBAlMvS0dI4ONZMoKO9hTXvfxtLVhx32F4xERmRX+Ay6mjyS/y0u92Yz/MoJNgX7/EYZAty3kwFK75KRH7W42iPj+bFXe1kg5BcW36leqHmfVeCeKARZbXLaN54BYjH/IYV9O1p5PXH1cEJt/DdWy/m3VfcwXc/v5bFF7hp3GEuOziwFSirXUYU5EDV3f4Eat93JVE6NWRrol2f/dtkyyKAbC4Hftq1A9zKzOe5IPbTZfHaNM38/JEb3PiyKKJp/bUEvR20vfTrwatA1RFh8oY197Lt/o8noTe/ppYlK09gT7piyJWi23zcd0Vf4TZMYeBmC0Uh0f69pKqXovlZkvF+aPmBsbYejjFH5kgHbYdBLlnjK8kv9wRldcfTsvFyEEnGMA1kdrFk5QkEDe6Cs7c/YOGHb0NSZcmuFHnpmmXuompYfiFCWXl5MvPwF7edTaRR0isFEOZybjyo5+PPqUryC+KCqrOJXY/cyEdXnUQUhXTdeRHgetgGe7HyPVCDGfaGNfcCg7c/h+cXQLR/b5JfErchv7yQakSwtwk8n3B/F+na5fGuHPkdAoAoSDLMs+EUk8qKrxIx1oHs+SKtKdMNFfOSWYzi+aRrlrpxEVHIQPt21+sUL8KqUcivt7eR8ocWNFGQG7zlpur2C4sfR9VtLJ2/7RnfLhi+NVG+9w0g27KTstplyRUpEPc2De5PqYA/uwpJV+Dl+lFAUuXkklmZblProZTnPncWgiZXrnsz7bxw/1oWvu8yymuX09e+y60Z5PmU1S4nyhWMRYuLsLLa5cnVqGYPEHRnaPrS37oJDFFIdV0DYOvhGHOkxvLzUligdbS3xMtRDOYXQK59B9n2bUl+FS7C2rp725D3U1U059boG/J4OHp+pavqCbsHLxYjjUhXVic9cOBW5U9XL0U8D/EHB7sX5ok3ez6L/voa9jyxLt59BHJdcfHn+XEBVZhhynO3nhlvfO0ybHh+AW59rzi/8nv6eqkyVzTWLieb2eUG2Xs+mj1Ay8bLCffvHTXDlqw47rB/H+boWfE1w+SLtFnvuYFhk7ZdUZGsgO+25sBLka6qA4RZ5amkcPrhfTcR9vXQ9OWLBt8hHvCaqqob3M5D9ajHQ7lbjn6yPEaYFGD5QaX9LDz9k6hGhAf20ffyT+jf8QtAhgykBRfOi864CVQpq6igftnxNO14iY4n1rHnybvQKIinXAuZx29PXhf1ddP+6E1Uv/ujyWOpBYvxZs1j2UfcatHND1ye9KrZmjjGTJ7CAu3v3vPGgmc0uShy+eAl+ZWqcssrCCSFU9eeViLxh+3aEQHq1hWbgPxyL9chy2Pkx1Yl54uCJMPytwnT1UvJZXYOyTDxfBZfsiG5u1BWXk55GA7LrwzAiPxq3XIt1ScPfk4Ab9bcJL/AMqwYrPiagU669B5yKvj5fc9iHc/cS/XJFzO7PEUQKn7FbNq3Xk/F3AXJMbnevZR5yv5MM4s+5MaViee7OIlCOr59FzWnDK7Ro2GOsCeDeD6Zx28ng7tyy6R8Qi+NqvLSF1YPHh9F+HMW4FXMIervjfdnE9oevsntS6buCja/arMGWcrqjkN1KX0v/RhJl9O29Qb8uTXuTFFIfpuMvOzAwJAr4CjXz+IL19PywGXUrFqbhF5+Rf/2rde5Ff0Bf06V+8zpIqwoa4wZnF0Yz4TM63zmXurO+iy5PTvwUym88jm0b72BsrnVyTH5MZ/7OjI0nPkZwiA32MMUhWSeWBfnl1t0dHh+5XWk0kSeTxSFZLv3sOPu85PnNArx59YAEueXR+uWa4kG9seZ5PJrz+O3EfX3oholPe2SLqd105UEPZmkV0+jcDC/4uEU2YEBCi9CF1+4npaNl1PzvitHz6+tNyRLb/hzqpDU6MNRzNQZV/ElIh8CPgW8BjhJVZ87yHHvBe4CfOB+Vf3ceM5rxifTM8Civ/lHyhetTB7LZnaTeXId4MZj/Xp7GyvO+Sy7v3oFf/HxwdDJD+CfdeqnR76xKkF3hvat1yfLXAhuVelFf/OPyQJ/+X0jt933CVJzFnD8xV+gb8DdImjetJba0/6B1Pw6NyMnvlCMsn0svuBO922QJdjX5vamjG9NSn716hELKGo8y0nA80nNr0NECLvb8H2fXG8nURSSy+x2rxUh1xlfQYdBvKq2MLvObT4e9h8gu28P4vvsXO8CVzRi2/0ftwUFj0GWYcee/OzC1MsvUrbI3RrLZnbR8eS6eNkXlwlLzrk16dHJy4/5XPP+t418Y1XC/XsPm1/5fSN33f8x/NlVLL/oHgCC7AAKNN57IfXn3J7kl6qiAwdoOP/zIJLkV2p+Pc0bLhrShJpTLqPzmXthyMRwRTyf9sc+TfVf/R3pqnrA7Rs7JL/iuxZJfkX5hbNdfp140R28cP/aOMPakvwCy7BiGG/P12+ADwJfPtgB4pbLvQf4K6AR+KmIPK6qvx3nuc1E8jzC3k7aH7mBFxZV0ZLpJl25wO3ZOIooUsqqFxNpPKYAku11QAj3d7kVqYOsu7IMc25FZaDj6XvQYIBwfxeg/P5OFwIaBnjpsuRYSZUT7Guj8YsXoFFEds8O9/aqSKpgu6T44rfiuDex/7f/Tt2Z/0i6dkUSdF663E1Xj9uWV7/seA7ULmJfR4Z4yS7SBStA5zoambVoeXLlHPa7wfUSb7sB038jXnNYlmElQMTlV8vGywl6MnieT7qy2u3XOIpII8prl7sFk1NlKCT5dKT5tf2uc+I3HRynletsTPKrKc6v3J4dyeB3l1+DvXaFdyB0oM9l2MKV7vu4WOt40s12lJSbeOSnUiw4VH51NuGnUkl+Pb/+0sE/rzjDLL+KZ1zFl6q+CCQDow/iJOBlVd0WH/swcDpgwVVEAsmATID0vIX4s+bhZXvY9tCVHH/uOrqYRba/j+/fPTjtOtfbzUmX3jPqG2ae+jzieXhzqtwyDJ5P2NOBW5xncOaMBgPUn3eHC7OCgfHu1p7QuuU6JJVCxCM1twYRIdi/l/JFK+PZjk1ubFp+XZ74c5TVvRKNB/oXLv9QOOai/dEbibJ9EIW0AeGBLlClefMnEc8fHPTqXsmB1m3kejvJ9XYecp81c2yyDDu25X/2U/MX4c2ax6L3r6V9643Mr6mljzLC/gNDio5cbye3rjl76HtAvNyEG/PlVy5IxptG+/cO5ofkN6YePb/at17vnlel/ZGb3HY+82qTHjVJlZGav4hcp1t/Mehsisd9DS71IOK5dcoKcm241q99MsmwfH61bLkuWRts8IMpURCMyC8YzDDLr+KZijFfS4DdBd83Am+dgvOaQ0in/GRT7MLHFsXdzrVzy2na3kxDwQ8sQMoXMk/fBoAbBy9EcUhoboBFZ9482D3vp8lldtK29QbKFq6MV9h3qzXnN8ktq13m3liJN7u9m913n8eseTUEKtSfcxuzytNs/8rgWmXplFuuQlLltD1y45D2Rbl+FI33LcufUNAoom3z1UgqzaIzb4YodO3wPKIgS8dTd6FoPPXaybZvT64ugRErXpsZwzJsGvJTqWRTbIBUKs2SlScwUFNLZeVc9u74w5CCA1zR0fu0Wx4nyLpZhhpkkz4oiRcehTi/OhvdWC8/7WYQqhvGIHFvf35WI4A/p5qaVVeSefx2or4e/MoFyYKprZuvIl2zNO6xSuP7Ptn+fjQM3J6Kwyg6uHRP3CsWdO+hfesNgCYZ5qXKiIIs6YUrafriBSPyy7P8mrYOW3yJyPeA+lGeuk5VvzWGc4x2SXnQ6SMicglwCcCXrzqTS05/x8EONeOQ8mXEkg+53r3ULlwIuFmRx5+7jhOPqxvx2hcAT0M3pqGA64YXEKH9kRvRbH+y2nS2I94T8jBrx+SXlBjo7SIKQ3bf+7cQBYifYqB9BwLMiicE1Jx6Oe1br0vGZwBoZzNhd4aK4/4kCS3x024AbDweLDW/jnBfe7L+z4HWbWgUIKlymr+yZvDzHOjC83yWrXzFqNuHmGPDVGZYYX6du/Zm3nna2aMdZiaA7/tDNpzO9XYm45auWb+F61avGrXg2IYb49S86Ury6/MB8SQe91fd/uhNSX65tf9CspldY95KTDUk6M6w++5zXbb4qWQV+rLycsIwRFJpvHTZkPzK97Slq5dR+E9M/LQrDM+6mT3fuo2ymqXkOpqoqF3q8ivIAmL5dQw5bPGlqu8a5zkagWUF3y8Fmg9yLKq6AdgAwI/utr0OJkHt3HIyT9/G8Bj54+MWjnm9sIaFC4jedRW5uEc+XbuMxnsucN3/fpoo20fD+Z9HwxxtW65NVmcO93cCxINCNcmX/F902L0HAH/OAnwg170nWWen47GbkvMHuazrxq9eyqIzbnJjxYCm+y6lf+cvmX3CW4ly/eRnLBGFcXHozu0XzFZ0PWA+tadenhRkMLg5dj7IzbFpKjOsML/ue3ab5dckONhirEeytl7VwnrK3/WJZFPuskXHub0jw8AtulqQX+2P3pRssZbkV8fuZCueIUtRxAs1i3j4c6sJujMuX4A9j30qOSzKZZFUmtSCxSz60KeTtcCymV20bbnWTR4KXUEFbuaiRhFtD1+PRgG5jqbBvWW9lCvkZs1NBv+D5dd0NxW3HX8KnCAixwFNwFnAhw/9EjOZxlpgHUrt3HJ+9fD1hHHu+JXVbvyCSDxJe7CzoOa9a5Lbji0brwBlcMue+DBJltWJ3KrQ57iu/5aHrqLh3Nuhu82tsB/77q0XoxXzqD/7s0MWYPUq5tDzq6cZaHrRBWlcuHmz5g4uKCtCRe3QdcCMOQTLsGlkIgaIV1bOZffD15OurGZg3x5S82rjfWvnA0Pza9GHPk2wr42y2uVu1Xx1txvdAPph4wVFktuP6ap6Wh/6B+rOuR3taR/SC/ezz52JVMxl0Yc+w4hO1CigZdMVbmhEfkxZfMjKj29i+1sjmzgAACAASURBVF3nDLlINMem8S418QHgbmAh8KSI/FJV3yMii3HTsU9V1UBE1gBP46Zpf0VVXxh3y01R5W9Ltvf7RGWziQb6gMEZQxoFQwe9qybTrr2yWbQ+uJZwf1eybha4WTzu1qAk49EOMxA6ubWYv/isOeUyWh9cy8IP3kB0YB9ldcfHz0e0brryYO9yWEe69Yk5NliGzUz53qDjL7qb5245A3ATc8L9e8nt2TEiv/LP5/OrZdMVhD0dw/Kr3A2UjwI327KsPB5EL6Peo3bZNrL3TFJl1J9zO0Fnk+VXCRvvbMdvAt8c5fFm4NSC758CnhrPuczUG+t+kSvO+SwAOzdfR+e373J7JB7owutpIxeEoBG5zE5ACXs6iDwPEcHzPMpSPrnevSypnUdTJkP7ozdRVrNkTO0TBouzIRefXoogsxNvdtWI13hls2nbeiOeP7hno4YhGgW0bbkGKXhcNKKy1l2t2nTs0mQZVrrGWnCUzVvI4tV3AtC0+RoyT9xB1NdDsLcZP5VyC7Eq5DI74/yKMyIKSMVL3uR6O1F6aH/s06Srl4ypZ933U/Esy9EuMg9x0RkFQ9bo0jB0JVwYjFi7y/Jr+rIV7s1BHe72ZO3cclr2tLLt7tVDHvc8wUv5Q24TAvx6exuz5tcMWbQVBhduzfekLYuLuUPxPCF3oItsfnHUPFWiKKL9m59F0rOGXJl66Qpq33Mp7VtvZP2//Piw5zDGHLsOV3Dki7OgJzOkaPHEQ4QhG2jn7fF93vzJrSPeK1/k9fYHLD7n1sO2zRPPLQGRrC0Wi5eYaPrShQAj8msgs4tU2SzLrxJgxZc5aocqzk669J4RvWYtmW7m1C4+6Gtq55bTlNkzZBam2xx35FVgQ/Vc9nTuI/PI9SOeq5xVxgf+6i/5zs9e5viL7h7x/EBN7UHbYIyZGQ5VnN265mx6n/4824Y9LhqNejy4Ym5v5g9DZmBqGIzahzU/Xg6j6eufIRr2nn4qzdLjTkhW8h/O8qs0WPFlJsVohdnx567jxItH2Zao4DUnXXoPme8N9ox5fXtp2XCR602rHrxdUDu3nG0PHfy9vvTEz/jXX3o2zsEYc8QOVpgdatbgNeu3uKLte18YfLBvH00bLsYTb0jRNJaV5W9dc7blVwmz4stMKxMxExPgz17bwKl/fQYnvf/8wx9sjDETYCLHVtk4rdJmxZeZMmMdwD8RXnf8YoJ//emEv68xZuayWYNmoljxZabMRPVqjVXqQAdRFOF5Y1uV2hhjDsV6o8xEsf+VTMn6mz97JS/+13eL3QxjjDFmCCu+TMn64+Pr2Lvt+WI3wxhjjBnCii9TshYtmIvf00IYBMVuijHGGJOw4suUtBPq5tC9N1PsZhhjjDEJK75MSTvzz05g20+eLnYzjDHGmIQVX6akLa6dT0/j74rdDGOMMSZhxZcpaRXlaerLsxzo6S52U4wxxhjAii8zA7zpuFratv+22M0wxhhjgHEWXyLyIRF5QUQiEXnLIY7bISK/FpFfishz4zmnMUfq9Le/iqZf/6jYzTDTkGWYMaYYxrvC/W+ADwJfHsOxf6GqNu3MTLk5s8qI9u4udjPM9GQZZoyZcuPq+VLVF1XVRjObaU1EeOOyStqbdha7KWaasQwzxhTDVI35UuC7IvIzEblkis5pTOLPXruU5t/YrUdz1CzDjDET5rDFl4h8T0R+M8qv04/gPO9Q1TcBpwAfE5F3HuJ8l4jIcyLy3IZv/fAITmHMwb31NUvp+L0N1ZmJpjLDCvPr2cdtE2ZjzOgOO+ZLVd813pOoanP8e7uIfBM4CXj2IMduADYA8KO7dbznNiavdraQyw6QLisvdlPMFJrKDCvMr/ue3Wb5ZYwZ1aTfdhSROSIyN/818G7cIFdjptTJr13I7pdsyQlzZCzDjDETbbxLTXxARBqBtwNPisjT8eOLReSp+LA64D9F5FfAT4AnVfU74zmvMUfjba9ZRuuvvl/sZphpxDLMGFMM41pqQlW/CXxzlMebgVPjr7cBfzye8xgzERpq5xN2/KTYzTDTiGWYMaYYbIV7M6OsrK2gK9NW7GYYY4yZwaz4MjPKKX+ylN2/+1Wxm2GMMWYGs+LLzChvetVSOn4z6kRbY4wxZkpY8WVmlPKyNJVRd7GbYYwxZgaz4svMOG955SJ2v/iLYjfDGGPMDGXFl5lxTn/bK9n53HeL3QxjjDEzlBVfZsZZtGAu2ptB1RYgN8YYM/Ws+DIz0soFPgN9B4rdDGOMMTOQFV9mRvrL1y/j9z95ptjNMMYYMwNZ8WVmpLe/Zgl7f/fTYjfDGGPMDGTFl5mRysvSzJF+cgMDxW6KMcaYGcaKLzNjvX7pPLo6bKshY4wxU8uKLzNjffDtJ/C7fx+xp7Ixxhgzqaz4MjNWQ+08vH2NxW6GMcaYGWZcxZeI/JOI/I+IPC8i3xSRqoMc914R+Z2IvCwinxzPOY2ZKCJCw1yf7r2ZYjfFFIllmDGmGMbb8/UM8DpVfQPwe+Ca4QeIiA/cA5wCvBY4W0ReO87zGjMhPvC243n5x7ba/QxmGWaMmXLjKr5U9buqGsTf/hhYOsphJwEvq+o2Vc0CDwOnj+e8xkyUP331EjLbf1PsZpgisQwzxhTDRI75uhD49iiPLwF2F3zfGD9mzLRQNysqdhPM9GAZZoyZEoctvkTkeyLym1F+nV5wzHVAAGwe7S1Geeygm+qJyCUi8pyIPLfhWz8cy2cwZlxeU1dB4x9+V+xmmEkylRlWmF/PPr5lYj6AMabkpA53gKq+61DPi8gFwCrgZB19p+JGYFnB90uB5kOcbwOwAYAf3W07H5tJ93/e8Wo++eQTLH3Fq4vdFDMJpjLDCvPrvme3WX4ZY0Y13tmO7wWuBk5T1YPtUvxT4AQROU5EyoCzgMfHc15jJlJ9zTwGOluK3QxTBJZhxphiGO+Yr/XAXOAZEfmliHwJQEQWi8hTAPFg1jXA08CLwCOq+sI4z2vMhKqbIwRBrtjNMFPPMswYM+UOe9vxUFT1lQd5vBk4teD7p4CnxnMuYybTKW+o48mf/IDX/39/VeymmClkGWaMKQZb4d4Y4C/edAL7XvzPYjfDGGPMDDCunq9JV3disVtgZogy4I0n9vFHDXOL3RRTIuzfkjEzS938ijEfK6NP7jn2icgl8cyjklXqn7HUPx/YZzSjmwl/ZvYZS0Opf8bJ+nylfNvxkmI3YAqU+mcs9c8H9hnN6GbCn5l9xtJQ6p9xUj5fKRdfxhhjjDHTjhVfxhhjjDFTqJSLr5K9B12g1D9jqX8+sM9oRjcT/szsM5aGUv+Mk/L5SnbAvTHGGGPMdFTKPV/GGGOMMdNOSRdfIvJPIvI/IvK8iHxTRKqK3aaJJCIfEpEXRCQSkbcUuz0TSUTeKyK/E5GXReSTxW7PRBORr4hIu4j8pthtmQwiskxEvi8iL8b/Ri8rdpuONaWeX1C6GWb5deyb7Awr6eILeAZ4naq+Afg9cE2R2zPRfgN8EHi22A2ZSCLiA/cApwCvBc4WkdcWt1UT7gHgvcVuxCQKgLWq+hrgbcDHSvDvcLKVen5BCWaY5VfJmNQMK+niS1W/G2+KC/BjYGkx2zPRVPVFVf1dsdsxCU4CXlbVbaqaBR4GTi9ymyaUqj4LdBa7HZNFVVtU9efx1z24DamXFLdVx5ZSzy8o2Qyz/CoBk51hJV18DXMh8O1iN8KMyRJgd8H3jdh/3McsEVkJ/Anw38VtyTHN8uvYYflVYiYjw6b33o5jICLfA+pHeeo6Vf1WfMx1uC7EzVPZtokwls9XgmSUx2xa7jFIRCqBrwOXq2p3sdsz3ZR6fsGMzDDLrxIyWRl2zBdfqvquQz0vIhcAq4CT9RhcV+Nwn69ENQLLCr5fCjQXqS3mKIlIGhdam1X1G8Vuz3RU6vkFMzLDLL9KxGRmWEnfdhSR9wJXA6ep6oFit8eM2U+BE0TkOBEpA84CHi9ym8wREBEB/h/woqquK3Z7jkWWX8csy68SMNkZVtLFF7AemAs8IyK/FJEvFbtBE0lEPiAijcDbgSdF5Olit2kixIOM1wBP4wY5PqKqLxS3VRNLRLYA/wW8WkQaReRvi92mCfYO4DzgL+OfvV+KyKnFbtQxpqTzC0ozwyy/SsakZpitcG+MMcYYM4VKvefLGGOMMWZaseLLGGOMMWYKWfFljDHGGDOFrPgyxhhjjJlCVnwZY4wxxkwhK76MMcYYY6aQFV/GGGOMMVPIii8zbiJyrYjcX+x2GGOMMccCK74MIrJDRNpEZE7BYxeJyA/G8npVvUVVL5qEdv1ARPpFpFdE9onIsyLy+ok+jzHGHI6I/JmI/CjOok4R+aGI/C8R2S8ic0c5/hciskZEVoqIisjPhz1fKyJZEdkxZR/CTBtWfJm8FHBZsRsxijWqWgnUAD8AHixuc4wxM42IzAOeAO4GqoElwKeBfbiNtP/PsONfB7wW2FLw8Jz48bwPA9snsdlmGrPiy+T9E/D3IlI12pMicpeI7BaRbhH5mYj8r4LnPiUiD8Vff0dE1gx77a9E5IPx138kIs/EV46/E5G/GUvj4v3SHsYFWv59TxKR/xKRLhFpEZH18Ua2iMg9InLHsHb8i4hcHn+9WES+LiJ7RGS7iHxi2Ps+F3/WNhGxjaGNmdleBaCqW1Q1VNU+Vf2uqj4PbATOH3b8+cCTqtpR8NiDwAXDjtk0mY0205cVXybvOVzP0t8f5PmfAm/EXfV9DXhURCpGOe5rwNn5b0TktcAK3Ka5c4Bn4mMWxcd9UUROPFzj4qLqHODHBQ+HwBVALW5j3pOBj8bPbQTOFhEvfn1t/PyW+LF/AX6Fu4I9GbhcRN4Tv/Yu4C5VnQe8AnjkcO0zxpS03wOhiGwUkVNEZEHBcw8C/0tElgPE+fJhRhZWDwFniYgvIq/BbZr+31PQdjMNWfFlCt0IfFxEFg5/QlUfUtUOVQ1U9Q6gHHj1KO/xTeCNIrIi/v4c4BuqOgCsAnao6lfj9/k58HXgjEO06Qsi0gX0AmtwXf35Nv1MVX8cv9cO4MvA/46f+wnulsDJ8eFnAT9Q1TbgT4GFqvoZVc2q6jbgvvgYgBzwShGpVdVeVS0s+IwxM4yqdgN/BiguK/aIyOMiUqequ4F/B86NDz8ZqACeHPY2jcDvgHfhesCs12sGs+LLJFT1N7hxDZ8c/pyIrBWRF+PBpl3AfFyP0/D36MGFTr6QOQvYHH+9AnhrfJuwK36fc4D6QzTrE6pahQuzVcBjIvKGuE2vEpEnRKRVRLqBW4a1aSODgXgug+PFVgCLh7XjWqAufv5vcbcZ/kdEfioiqw7RPmPMDKCqL6rqalVdCrwOWAzcGT9deOvxPOBrqpob5W02Aatxvf4PTW6LzXRmxZcZ7ibgYtztOADi8V1XA38DLIiLoX2AHOQ9tuBu+b0dmAV8P358N/DvqlpV8KtSVS89XKNUNVLV/wBeBt4dP3wv8D/ACfEtwmuHtekh4HQR+WPgNcA/F7Rj+7B2zFXVU+NzvaSqZ+Nujd6GK/jmYIwxgKr+D/AArggD+AawRET+AvggB+/V+jrwPmCbqu6c7Haa6cuKLzOEqr4MbAU+UfDwXCAA9gApEbkRmHeIt3kK17v0GWCrqkbx408ArxKR80QkHf/603j8w2HFxdxrgRcK2tUN9IrIHwFDijhVbcSNVXsQ+Lqq9sVP/QToFpGrRWRWPAbjdSLyp/F5zhWRhXG7u+LXhGNpozGm9MQThdaKyNL4+2W43qsfA6jqfuAx4KvATlV9brT3iY/7S2DCl+YxxxYrvsxoPgMU9vQ8DXwbN+h0J9CP6z0aVTy+6xu4sQ1fK3i8B9drdRbQDLTiepbKD9GW9fE6X724Iup6Vf12/Nzf4wa29uDGYWwd5fUbgddTsESFqobA+3ETCLYDGeB+3K1UgPcCL8TnvAs4S1X7D9FGY0xp6wHeCvy3iOzHFV2/AdYWHLMRd9F5yLFcqvqcqv5hshpqjg2iqsVugzGTRkTeibv9uLKgB84YY4wpGuv5MiVLRNK4hWPvt8LLGGPMdGHFlylJ8TiyLqCBwRlJxhhjTNHZbUdjjDHGmClkPV/GGGOMMVPIii9jjDHGmCmUKnYDDmXLT3bZPVFjZpCzT1p+sIV7jzmWX8bMLK+qq+TNK6rHlGHTuvjq7Q+K3QRjjDkqll/GzCwDubFPqh/3bUcRWSYi34/3/XtBRC4b5RgRkS+IyMsi8ryIvGm85zXGmIlgGWaMmWoT0fMVAGtV9eciMhf4mYg8o6q/LTjmFOCE+NdbcXvyvXUCzm2MMeNlGWaMmVLj7vlS1RZV/Xn8dQ/wIgWbMsdOBzap82OgSkQaxntuY4wZL8swY8xUm9AxXyKyEvgT4L+HPbWEoXsBNsaPtYzyHpcAlwCcu/Zm3nna2UOfR5mfjqjwQWT6jc1VVfpD2JfzUKZf+4wxBzfeDLP8MsaMxYQVXyJSCXwduFxVu4c/PcpLRp0JpKobgA0A9z27bcQx89MRVXMqiCQF0zC8UKVCA9jfT1fOL3ZrjDFjNBEZZvlljBmLCVnnK95D7+vAZlX9xiiHNALLCr5fCjQfzbkqfKZvcAGIEEmKCsstY44ZU5Vhll/GGJiY2Y4C/D/gRVVdd5DDHgfOj2cMvQ3Yp6ojbjmO8XzTN7jyRKblLQVjzEhTmWGWX8YYmJjbju8AzgN+LSK/jB+7FlgOoKpfAp4CTgVeBg4AH5mA8xbNc//5b9x7241EYch7P/hhzrzo48VukjHm6FmGWYYZM6XGXXyp6n8y+niIwmMU+Nh4zzUdhGHIPZ+9lls2bKW2voFPnHUKb/uLd7PiFa8udtOMMUfBMswyzJipZns7HqHf/foXNCxfScOyFaTTZfzvU07nv77/dLGbZYwxY2IZZkzxTevthcbrsvM/wL7u4ZOWYP68edy16ZtH9Z4d7a0srB9cAqi2roHfPf+Lo26jMcaMZjLyCyzDjJkOSrr42tfdzQmXrB/x+Esb1hz1e7q7D0PZ4FRjzESbjPwCyzBjpgO77XiEausa2NPalHyfaWuhelFdEVtkjDFjZxlmTPFZ8XWEXv26N9K8czutjbvI5bL8+7e/xdv+/D3FbpYxxoyJZZgxxVfStx0ng59K8dFrb+G6vzubKAx59wfOYuUrbZaQMebYYBlmTPFZ8XUUTnrnyZz0zpOL3QxjjDkqlmHGFFdJF1/z580bdXDq/HnzitAaY4wZO8svY0pXSRdf45mObYwxxWT5ZUzpsgH3xhhjjDFTyIovY4wxxpgpZMWXMcYYY8wUsuLLGGOMMWYKWfF1FNbdcAVn/u/X8X8/8OfFbooxxhwRyy9jim9Cii8R+YqItIvIbw7y/J+LyD4R+WX868aJOG+x/NXpf8PN936t2M0wxkwAyy9jzFSbqJ6vB4D3HuaY/1DVN8a/PjNB5y2K17/l7cydv6DYzTDGTIwHsPwyxkyhCSm+VPVZoHMi3msy7NvbwWc/cS7dXdO2icaYIrH8MsZMtakc8/V2EfmViHxbRE6cwvPyb/+8maj5V/zrNx+aytMaY0qH5ZcxZsJMVfH1c2CFqv4xcDfwzwc7UEQuEZHnROS5Zx/fMu4T79vbwS+eeYw7P7iUXzzzmF09GmOOlOWXMWZCTUnxpardqtobf/0UkBaR2oMcu0FV36Kqb3nnaWeP+9z/9s+bef8r4YS6Wbz/ldjVozHmiFh+GWMm2pQUXyJSLyISf31SfN6OyT5v/qrxw2+eD8CH3zx/Qq4eb73qUq44dxWNO/7AuSe/ie98w2YOGVOqLL+MMRNtQjbWFpEtwJ8DtSLSCNwEpAFU9UvAGcClIhIAfcBZqqoTce5DyV811lSmAfd7/urxAx/5xFG/7zW33ztRTTTGFJnllzFmqk1I8aWqh+xfV9X1wPqJONeR+PVP/oP/aOlny/ONQx6v2vMf4wovY0zpsPwyxky1CSm+pqsb73202E0wxpijYvllTOmy7YWMMcYYY6bQMVd8qSpM/nCL8VFlCoaEGGOOMZZfxhg4Bouv/hA8DaZvgKniaUB/WOyGGGOmG8svYwwcg2O+9uU82N9PhQ/x7O9pRVXpD+N2GmNMAcsvYwwcg8WXInTlfMgVuyXGGHNkLL+MMXAM3nY0xhhjjDmWWfFljDHGGDOFrPgyxhhjjJlCVnwZY4wxxkyhY27AvSldt645m97eHgD2dWSINAJANKJqYT0AlZVzuWb9ljG9R6HDvc4YY8Yrnz+F+QWDGTaWHLIMmxms+DJFceuas2nauX1IQEVRSHp+PfWnXUlqoB/x3IbCbVuvp7c/AKBrz0uHfN/e3h6Ov+juEY9vu//jE9h6Y8xMdrAiK8plWXze7dTkcoifTh7PZ9jh8gssw2YKK77METmaq7LC13TtaUXFI4pC/NlVRPu7EM8D8fBnVxF0t9P89VuQVDkN598BgF+5gMWr7wRg5/rzD9mWvZl2Xrh/LSdedMeEfF5jTGk50gw7WH6l5ixAy+e4DPNT+HOqiLoztP/LHWgY4M2aR/3ZtwCDGXa4/Mqfw5Q+K77METnUVdmhwuRN17hNgp9ffymLV99JX/su/PmLaPva1TRccCfZzC5S8+sJ9rWSrqqnZdOVRLksABoG9LXvAiDKDnDd6lWAK7TSldUA+BWzOfGiO2ja8RIdT6zjhfvXEvYfSNqQ6+3kutWrrOvemBnuSDNsb6aditqlnHjRHUPyK1VVB0Dr5quoed+VlNUuJ9u+nbJFxxHlBmjdfNWIDNMwTM5RmF8wmGE/+9yZI/ILXIbduuZsy68SMSHFl4h8BVgFtKvq60Z5XoC7gFOBA8BqVf35RJzbFN8L96+lP9PO3kw79WfdnDzu+z71y47nZ58784jeb3BfucEtWBRFxAM/Rfm7PkEUBCwE0tVLAGjZdCW7Xn4RVAmCHEHvPhrOXweAAGF3G0tWnsDPb/1QUrwVsqJs5rL8mtnyhU6ut3NIhuXzK39BN1YaZAu/G/xSBAV27/gDS865ldTAQJJfMJhhURgyMCy/UmXlDGR20bjlWsuvEjFRPV8PAOuBTQd5/hTghPjXW4F749/NMSB/pda1p5VQla47LwIg6utxQeN5uIiAli3XoVGEhgHip9gdv8dzt54JGoI39n9yXrocVUX8FGW1y2l5cK07bxQXaCLJ7+L5pBYsJtjbjPjuHJIqA4aGoYrH7PdcQRgO3bxu98PX21XlzPUAll8l7WAZFvZ0gOcPObZ58zUj8gvguVvOGFN+5a8dRQRJlbnf/RSqLhfV88kO9IMWXGiKgOdTtnCF+9ZPWX6VuAkpvlT1WRFZeYhDTgc2qfuX9mMRqRKRBlVtmYjzm8lz65qz2b3jD6Qrq4nw8GfPdcFQNpva066i48l11LzvSgBS8+uQVBm5jt1knriDhgvuJNexm9T8Orx0OS0PXEa4fy+57AAaXxGqKhrlB6wqhAFBZxPh/i5aHrgcgGB/Jy0PXE7Qk0E8z73WT0EUIn4aDXMuvBBQJezdC0Au46JTo4Cy8vLkM4VhSHnt8iGfM11ZPeot09H+PGwmUmmx/Cpth8owgIbVXyDX2Qi4DAv2tSX5JUA2zrC2zVcNya+kcIp/j3JZ1/Pe2QgihL2dtG68AnAZlnn89vjCUF0RF4VAXJiFbr+pfD9Z2Lt3SH5FqRS+74pEy6/SMFVjvpbAkIuIxvgxC69prmnndhae8SnES6FRQGrBYsRP07rpilGPb3v4OqK+bqK+blo3XoFGAXg+XvnswQ54P4WIR7C3GaLQBU8YuO8h6cnKR5EANauupOOpuwh79iTnUo3I7dkOCEF3huYN7mrWFXMa12LuPcIwpGnHS0RRSBgc2cZ6hYE12m0JsJlIJc7y6xiVL7xGZFgqTeumK0cc3/7oTUT9vUl+gSt+vIrK0fML0CiM86vJPS+S9JC54RKCALWr1tLxnfWDGaYRQedu8ncNwp4Omr642r0qCobkF7gMs/wqHVNVfMkoj+kojyEilwCXAJy79mbeedrZk9kucxiRRqSrlyCpMrKZXe5WX5hLAqcwHAB04AD1591BsK+NdM0yNMgifpqWTVe4QivIxgEDiIfm+mnecLGb7TinCoCwtxN/7kLqP3wr4Aa0ltUuR4MB/DnV7mtVch3u/8N09VL8uTXUn3M7AEFnE22P3IiGrvDD81E8KmqXIuIDQpAdQBkMtyDI0dG2h0tPeTPi+3jiMb+mlsrKuUMG6D6//tLkqnMgs2sS/+TNNGL5dYzq7e0hXVk9MsOCnMuHYaJs35D8Anfbr3XzVaPmFxqhUUj7Y5/Gr1wAuPzyZs0jVb2URWfc5Hr9N17ucigYwK+sJl2zbDC/apaBapJho+VXFIF4XpJfwJAMy+fX3536ZkRJMmzJiuMsv6apqSq+GoFlBd8vBZpHO1BVNwAbAO57dtuoAWcO72iXhGjc/hIqgxsfRGFIrrMpWbPGS5UR5QZcKPgpZMj/S5LcThxOcP9bparqWfaRu5LHd93/MWpXrWV2vbsCa9p8DSGdaDBAS3xlGvV1k42DSspnuyBTCPd3ujbNWYBXPnvwFqSfQvwUmSfuQDzXzR/u76JsbnV8exJy+9qGFI5RXzdEAaTK8eYsIOhqo6OthY62FkDpvNVNGhg6mNbMEJZfU2w8S9rkl4MAt3YgQK6r1U3YATw/TRRkk56wfCaMxWj5VffXVwNQXrs8yS9wmdKaH6fa1xPfZgQpm03rxiuS/PLnVKOoy7B4nNfw/HK3MfcOaWthhkV93W5IRhTFF5tCkO1j5x9+D0DHZ89APB/VkKbN17DknFvH/JnN5Jiq4utxYI2IPIwbqLrPxktMrqNZqK+3t4dINbmCA9DezviKLevGWQ1XkFsyohgbNDiWoZPdX70MAC8ec1Eoyh6g7qxbBs8XhWSeuAPirva6sz6bHNvy1Y8jXoq6sFLTcAAAIABJREFUc25z5wgHu+O9ikpqV60ltWAx7Y/eBPu73DG5ftq23pC0PR/IquDPraX2NNfL1vyVNSy+cL27tdnRSFl8tbj77nMP9sdnSpfl1xQ72vw6/qK7ee6WM5IM81QJ97sxoPlMSSriUaLqYPmVV5hfQPLeeaPlF+AyJy6U8hnWuvEKFKX+gs8nF3X534fnl+b63fOF+YXrDQP31nVn3TxkDG7HE+uov8DNmMxnWLZ9Ox3fHiweTfFM1FITW4A/B2pFpBG4CUgDqOqXgKdw07Rfxk3V/shEnNdMAvGRstlo9sDI56KAbNs2wI1PaNtyLRoGtG25Fldexd3hnU0oSq59e/LSsKcDUNK1K5KFB1s2Xj5qE1LzF5HrcANgJT+7SFzfWa6zcXA6ERT0tCnilw0pwADaHr6WoKsN8TyyPR3gucUQJV1Ozbs/Riqe6u2uUIcGr2qUVI359XoA+jONVNQuHdFuG8x6bLL8Ki06ZCZg4c+0QhSQ27PDDYzvzqBhQOP68xi8PJQx5xdA4z3nM1y6ZmmSk3j+YEEnQtTXk9xuzI+HzedZsiJ+Qe9WsK+NbPsOvHRZ/H6pZHiGpMqoO/tzgNL64NrkQnLoH8bgl/kJARoG9GcaR5SZll9Ta6JmOx5yYEM8S+hjE3Euc/Ty63ENXydm+A+XZg/QcIFbUb51y7V0PLHOzbjpcz+Y3qy5pGuWUv3ujyY9Qs0PXAZRSNTXTXrhysEAUVcwuYKngqivm8Z7LsANKnWzHNu2Xo8/ZwFeusJNxVY3m7GsZimIhzdrnrv6iyIIcknBNCTYENftXpC7qQVLIAypO/MfSdeuQETIduwmXbPMDaaVwVAE4kH+cdEFBJ3N7usoJNg3uOp0y4N/T/mCenK9nbSLR6QRopENZj1GWX4dG8aaX3h+kl+qStvD19Lx5DqC7gzi+fhzqtAoJFVVR8N5bieM5gcuo+H8z9N4z/kj8ivfu5SqaiDq66b5K2viLHQZ1rz5k6Qqq9G4ly3fe5WuWerSKS6K2h/7tDvv/LqkneK633GHDS2e8hMDxE+x9GOb0Cgi19mYjEVr+WqcK/HrU9VLhhRb4YG9ySzOwgwL93fRuuVaRLD8KiJb4X4GCfsPUH/WzSxZ+f+zd94BUpXX+/+8t0zZ3tjC0ouKiD0aTeyxY0cRGxY0QbHBLybYW2z5gqCo0aAiSBOMEQFRsUSjUbGCYEGKW2DLbC9Tbnl/f9yZuzO7S1lcVGSef2Dv3qk79zPnPe85zxmccHxrF1fh+X/DqC1HmhEqF9wOQN6p452UeicFq9K22TzzRkQcvKyWOtT0HkgjRNElkzHqK9BzeyGjW4kVsyZQcOZfqJh3K0CbV1cUWoWj7mPzczdgNQWoXHA73oweANjNNUjLpPzJKxK9DBUVNT0PJy8WnyXrKKF52uq43Fgslsq33aaBmNTULKyWesKN1WBZoHtcYFW9/FCymDWppHaSdoRfAsg94RqktJ1MvbTIPSW6bdgeCAKwzQ78cpqMTJdfQALDKmZNIP+MmwDYNGcidmOlc3dxmSgtI4/el01l/cMjCcz9KxDjlxXNvMVLomUWOv+LMrJ9Y1Ps+Sl6tAY3diwh0SejgZp078f9lW05TVNJfv1sSgZfu6EqStcnmPTVRVeTDTUBHOe/xItdShstpxg1xUl3xzJPZm159MIXjueW7kMoCnmn3OBuF0rbpGbJZPKH30DN4snoHi8mwgGTEOgeL5qmU9xvMOG8fABUTUPJKOjwvD0ZPTCaa9l33BPusfKNa/Hm9WHTjBvYd9wTrlt1uLGSwNy/YrfURwHadj8ivsBWSqe9PNiItG0Ci5yOSTvYCJbpWGeEW9zbxApb9ZxeCCMI4AKrPSCTSiqp7tfW+FVR6mz3teeXntvb3a7Tsjvnl1FThtC8Hfilahqb59yMpunoHscvMJ5hMX4BVCgqxf0GO9M2OpFQtQR+Aa6Dfuz46ukTCFaXUTFrApqmx15E3OKwjV+xLL3VUkfZY6ORtuUyTNo2Fc//2elKj1pexAJOLasQNbpNkOTXz6Nk8PUr1P3jRtFQE+gw1sc2Iqiq2sGkT0/LYcCYR/ns/nMdd3rb7LSrT6iak4bvUJvgXLRFo+5j8+ybEIqGP79tBaV2Vqi/BaWlpVM671YUf0YCDBTdh9FUjdJJXUMoUEakqZaV08YmHM8/489Uvfx3xzdMKE4dmW1hG2Gn3kJKjMAP2KEm8kfcmfCaapZNo/KF2xGKQv7Ie4iRTwgFKW1qlkxG7UKXVFJJJbV9+jH8Kp89EeikKzlWV6WoW+SXJ6sQNS27A7+K+w2mQumknqoTKUJh/fRrqa2qQEnJbDuu+wgHSrZY0B9pruvAr7zhE/B4vZTOvAmhJPIrJqP6h+hrE+SPuCPhNdUsewyzvoKCUX8jYfVpmQSWTIZYYJfUz6Jk8PUrVHNzEwf8pWOB5KcPjHRnlXWmrB6F1Gwuww42YjZUtv3CtojP0SfWJgi3GNRqrHTS87bppq1VVUX1pbhbiuFACYGlUxxbB3BbqFdOG4vZFOBvMxZzy6XDu9TpJG0bNS2bnpdOcY+VTL8mYXVsGxGktBGqjtlQiR1sJLBkMkLVsFoaQDgrYkV3VrZC93Z4HIgW+NsWVnMtlrTxRLdAk0oqqe7Rj+EXQE3l5g78im/E6Sq/AIS0MZprCQdKqF4yFdsIuTWiVks9K6eNRfWlkJmbt1WGbTGIkzKBX6XPXo+eU4zVWOkk5trxC5wsfM2rU53JH+DsJOT1RSgKFXNvRpqxIC0h7R8t/G8kgiChSDapn1TJ4Gs3VShQ5ha8x7JGRnNttMDcpmr+re650rZRUzNR/ZlIabtdimZTwLGgiVpRhJc/AsEGal68m8zcPPf2fqBHv4E0Nzehqo7RYNFlj7i/N2rK8GQVUv7UlYCT/eos0EpLSwcSg7CGmgCmZaBnFibUKSgeHxXzbkVIm6r5tyGjRmOxrQc1I58ep96IL68XG6ZeiFASV4Ey0krByHsILJ2CntO7zR+sphQUFTUtB6u5jkhjtdt6bgeb2Pio0/0kzQjhol4JzzuppJLqPsUYFs8vcCxnOvIrC6F5EZreZX6tB3r1H0xzcxPF/QZTZYYpvHgSIpo5MmrK0LMKqZg1AX+qD9gywzRFdDjeUBNwHj+OX0LRqJg1wS1/KH/y8gR+gcOw3qMn8cO0jh2XMuKYXVfMmoCeE+3MFqKNX6nZ5A3/f1TMnZjk18+kZPC1C6szU1RwYJNSut7tVgGnTsK2TFbcN6JDoam0TCwpsS0ToeoUjLwnYdhsYNHfyTv1RlKKBmJEwq4R4ObnbkAVgn3HPcH66dfytxmLt/l8S2dPxLYtJ13e9gyI1Ja7W4pdbWvubJVZfPW0Ds9p3Gm/JT9qiOjIdoEno5m4tnltsVMSV82xeZLO7yUIldxTxyMQSNsgtsqsivPiSSqppDqqK/wCEvjlGorSxi/hS8dqqUOoOvkj73VvF1j0EIUXT8asK3ed7mHH+LV++rWYLXVto9CcZ4BZX4nZUkdawR7AjjEsvpGg+JppAAnPqyO/nJ2EtlKMtnm5jvOPiL4/nfArzhA7ya+fR8ngaxdWc3MTWnpeQroaOm65gTMXTCga/a6dSbCqJHpBShAKlfNvw2quJXbh1bw2jbxTbkTL7gmA0D1UvnA73swemHFdM0L3EWmsdrcMt6WJ0+Zyy6XDaQ6ZrqN9TOFAScJqc2coMzevQ6cUQIWqomrOpeAWswoSAlChRQMuRUXN6OHUxdm2s/VgWyiahmU7EwBiLtPNISegqwus4/5xo5JeOUklFaeu8AsAIeh/3WyCVbERQREQUDn/9ii/wGXYq4+448YUfzoVs8ZjtdQnZI52hF8AVw8/pAO/ABRF3anX+Jb4FXa5KaLG0c6/SjTIjEmoegK/3NKPJL9+FiWDr91Qrh+WUKKDsjXUlKy2eqbGaqpffgDFl0b+uXeRe8I1VL5wG2k+jZqqarT06MVum+g5xfS88P5OU9+7ioS0nS1Kn5Ned1yrox2cmtcdEQJOnYXiz0DoPryZTr2XqmkU9xscHdxto+f1Rk3Lxta8yEgQ4Uvnh/XfcfXwQwBn66Goz4CkoWFSSe2AhKJgxznIx/gFcTWZjVVUzruZ/HPvIn/EnZi15VS9cDtWa/2vjl/gcKPkhdvdIn+rpZ5I1YYO/IoFoEL3kVI40LHl2QK/8i94kIoXbu/Ar9jMSCDJsB+hZPD1K5SiO/VOMesGcNqxY8WVvrxetG5e1+YUL213RA84rspaZgGVc/6Conupef2xNiNAhDuQVtF95J04tl3q+6fXtmrEtnVer/7OajIGkrpQE8KXjp5TTMGo+4gV5UYCJdQsmUzR6CkYgVLqlk7ucF8SGXXGl9iRIEWXPIy0DMy6Te5q+Ydpl+zQ+JSkktod1Bm/AMdXjyi/KtY5jUBStuOXU9xpNlRSs2Qyiu6lYu5E7GATQlXBtn9x/ILtY9jWzpk4bW6CQ31Na73LLyGE41Sh6ZQ/Ppqel0/DCJR2+jwS+BVlWOHFkxL4FQ6U0LzcqdlNMmzHlQy+djHFX2B1gSqEL92pwwK0qAdNj1OvJ7z8EbdW4P5xo5zgS6hOyj5O7sBs3YuUslOLCWmEKTjvHvoMGoI36qsFsGnGDW4aPNyFLUPVl8KmGYmjhYzmWnr3G7jd9xGv7V1lxc5rP0Yj9v8YxG65dDhBPFTP+Ut0m9UBuhWdD7l5xvWY0XEjQlGxTKPN10dK1//HagxQMWu8U4MRXaFD4qiipJLanbSj/GpubgI1jl8S0DR3AblVfoWdAEI2VQH8aH4JaXfgV+z4jmp7GBZ/Tvz72Nzc5Lr+xxh2zakHu/wSqhYdueS8PzF+CUVBmpE4fjm1YjF+VS28E7uljorZNyXwSygaGd6kzc6PVTL42sUUnzFZOW2sU2ga79K+hdsUnn8vVa9McmolpO1ksqI+V9K2nVlkqkZsu82s24TVXOs4y7fU07k/fNeVlpYOzU3gS/zopeUN/MlS1duTdRo6xhk7UvL91+hRU9nNM8e7XU7lj19Kr2tmUvrYJVQtuNMt/nVlW6CoFFz4EELRkJbh2liUTksO505q99SO8mvAmEepnzIGPa+343clBMTc322bSNUGJxDTdGd8j2XG8asWs64cj9fXeS1ZFxXrfGyvtLyO9Vg7S9tiWFaPQgaMeTSBX1JKKmffRMGFD7HpqSvpdc1MKubeTNUCZ+xRgmwTVI2C85zRbPH82vTMOPD6d+4L3A2UDL52Yam+FCJVPzjOxtJCiRaIx+/Ju+eqKkLRqJx3M2pqtnOxxc9GVDX03N5uKzJxF6NTF9A9hny7Yi2AcAtXhdPlpKjOnLVAKao/g6JLJlPx/J8pGv2w468TCSFtE6ulHrO+wr2tO9MtaSSdVFJd4hc49jHOXEWn7jK+IUbv0c9hl5SuO317bc0jrCva1Rjm8suMIKML6xi/co8fS/WihxCqSuHFk6mc5/ALYlYcMjofMo5fSXWLksHXLqTY9mG8E7InoweqLwU/kU5bpWO30cIhck68hsDiSRRc+JBb1wU4aWWirtBxoygUfwb5p02g6uWH3PtTVdW1ZzCaa92V1q/RC8YZVSLjtgmdqEnPKkQoCrVvPIEdbKRi9k1YLXVsfm48VkstBeffh5rRw/HYiQ7BNWpKXTNHgPrqig6O1sB2dV0lldSuqvaf+5hJsdkU4LHFH3d6m4aaACXfryHnBGe2eWDxJPJOvynKL0HF7D+31a9aJkb1xqiNgnD5FVs8dje/Kss2YEYibFzxOna4BTMSIdVqJC3F1+HcmsZWZFoPpwYLhX6/PRXN46Gw9wAUVe3k3n+cOvKrTS6/Iq3YwQbn/Jlt/NKye7J5xnVoUWuO9vxqqAlgS7sDw5zvoqS2R8ngaxdSbPswfrQGOLUL8dt4CYWXm8uiZoDt9+jjRvd4/FTNvzVqHFqL4s/Aaq1HFYLw8kewg42u03O89044L3+b3ji7omKFrXWBqijTo/45tk3VgjucVTcCO9JK/sh78eT1IRIowZPXh80zbnDnrUHcmBPbIuKaKEpsG7JOus49L2Z1UTX/9p3/ApNK6meSFEoHawkgodswnl/11RVYthNItbsn91/F43cDB8DN7MeMU7uLX5ZpUrb+G8o+eQOjbhNZKRp79tDJy0jhiuN70Tu/o/3EltTQHGTJx68QipisWN5IfUTBl9uLgYefSnZ+ER5vx+CtK0pLS6d09sQEfoHDsMr5tyEUFTvSStHoKS6X9NzeVDx3o+OkH3Pij1l6xPHLaqkH2+owtkjVNCrm3UqPHazd3d3ULcGXEOIkYCqgAtOllA+0+/2lwN+B8uihaVLK6d3x2LuDYjCqC1TRwzTdolOhKPjyenU4P74eoPb+kahp2W2eXaoWTUNHuxdVnfxz76Ji1gRyT51AYNGD5A2fQPXCO+nVb2BbF81rD7O+3eP8GrNd7YvxFX86QtVQPCkUX3g/pc9eT96pE6he9CBmYwAs04FSFE6Of07clq2quwauNa9ORUYH+UqgauFdTtu3olF4+ngsy8KWtls8C8m27Z9KSYbtPMVfU7ZtdYlfK6eNBdNo45fmiRZ+J/ILiDrCN5E3/P8hbZOaF+/mbzMW/yh+GZEwm9Z+xboPXyXVqOWwgdn85Zy9SfUPQtnOeY+dKTPNzwXHDgPgcsCybCpqG3n1k9l8uqyOepnGkBMvoaB3/y49TkNNIIEf7fkFsPHRS1D86ZiNgbZauaj7fYxfsRpWoWguvwSCwKvRwFlRqVp4d5tvmlDJH+40IXTWAJBUR/3o4EsIoQKPAccDZcAKIcQiKeWadqfOl1KO+7GPtzsqBqOV08biySnGjqbYzbpyWivWE26sxmrRt2iEJzwplP/jcmeMhm2z+dloYbmionhTyR9xRxRoEqFq6DnF6Gk5LjB/bRfP1lq248FfvnEtSkYBQvO4I0kUTwqV8291VoOK6qblpRlxvhjcehOB0L1snnmjMww32AjgOG8LgZZRgNlQgSevD5ueGecOC44NCY4p2ba985Vk2M5V/DXVMPVKtKwCpJQuv8DJyNxy6fBOAyKh+1x+xc4NLHookV9uUCbR83ojzQh2NAO9I/wKtTbz1TsvE1z3IX/Yp4iJl+xNqr/zea/dIVVVKO6RxZiTD2AMUN/UynNvzmT5v2rJGfwb9jn2bHwpae75W2KYtM2t8ss5ycJqqgFFdYvpgQR+CUXpwK/YzkjBBfcn8AucIvwkv7qm7sh8HQJ8L6VcDyCEmAecAbQHV1LdIM3jdUf8CEXHn98Hb0YPd0RGZyocdR+bnhlH4cWTsBqrnW0xywQhqJx/G+X/uAKAqvm3ouV0XIn+2rQ1GMevGiFqMmFGkJYzbDfvxLFUzLuVPoOH8MP675z6jVjXlRlxak2iXyy5J45Dy+6J1VBFYMkkkBI9t1f0fCeAjtVjWHGTA5L6yZVk2E8kVdXQYwyL8gvAk5HHgDGPdsqwvJOvp3rRg9FOYw9mbXnU3b6NX/Hjhn6MajaXsvrVGfQQ9Vxy2CAOP+3EtqkXP6Gy0lO4/sxDGGfZfP1DJU/Mvp0mfzEHnXkVKVvJJm2LX+A0UPUZsIfDL0UBW7g2HTF+Rao2kHviNWjZxS6/el82lZJnrkPLyKc9v5Lquroj+CoG4h3byoBDOznvHCHEkcB3wI1Sys5d3pLaqYql743aMrTMAidtrKhOp95zN1Jw7p0dqit2Z8W8hzRNb/MEystn4rS5/OmUg5wgGJw6CaGgeFPdUUyR5rroqtLpFJJW2A3UYlJ0T3RFn3zXf0YlGbarSEq07J4J/LKDTRSPfRYETs2SGeny1WSEw3z076foGf6BR84/lOyMlJ3y9LsqVVXYZ0ARj40tYsOmGu6dMRHfkGPZ79iztiso3Ba/INoNuR38aq1Yn1BSASQU4SfVNXVH8NXZJ6B9M/0rwFwpZVgI8SfgOeDYTu9MiKuAqwAumnAvR54+qhue4q9DMXPSmHGe1VyHJz0H1bdlUEjbcvb1Y4q7YN0WZNvCrCnHaqmnes5f3MeC3XtVs7XOqPvHjUJaJptnXA8kfuClbZLm06hvsrCFgpKSSeEF97P5+T87prbtilitljoq59+GN7PHVv+WSe00dRvDkvzauuIZZgeb8KTnuMc7U6S5zq05Ajrwy9kmc+wQBCKBYdtrevr1h8up+vBf/OXsAxg24Jgde2E/gfr3zGX69Sfx+qfrePrh6xgy/Cp67TFsi+dvs7NzS/yyOucX4NgTxXzZ2hXhb5pxQ5JfXVB3BF9lQO+4n3sB8SPfkVLWxP34T+BBtiAp5VPAUwD/fHd90hEpTjHjz/Koy3zp0+PIHT7ePVYXqOKWS4fTVFvdlr63LQKLJzmeU3VxfxYpMWpKsVrq0TIL8Of3wZOew77jnnBP2d3367fWGXXLpcNRNC9Fl051DkgbCUjToPxJZxtXCgU1JcMFVodveEVFz+2FmpqN2VyT8N4n9ZOq2xiW5NfWFc+wqpcfcvkVO1YXqEJThMseaYQJLJ6EHWzErC1vC75cftUibQtPViGax5vAsG3xq7W5ifdn3s8ZQ9MZNf7kH1VA/1NJCMGJBw/iyGF9mPLv+axY+yUHnTSqU6uKbXZ2qloHfgGURTtP2/Org6L8iinJr66pO4KvFcBgIUR/nE6g84EL4k8QQhRJKTdHfzwd+LobHne3UfviyrpAFXpaDkLzJthO6Gk5BPFg2nE2ErqHgjP/wua5t6D36IsQClLa0e0vB2RWQyU/TLsEIe2Ex9lVuhmb6muZ9/c/M+qm/yMtM/tH3df2zol0JCl7bLT7f3CKgYWiuA0SpmmgelLY/NwNmE01lD9xqXse0FarYptdeNykullJhu1EdXZN1QWqUNN7dLDNkbaFSVsgEeNX1csPgaq5Bd52tGtY8WdgtdRT/tSVAAkM29q1s3Hl/yh5Zx5PXPE7cjNTu+Nl7rAC9c388YHneWrixdv9XPxeDxNHHsZ/vljHk0/fxZGX346qaV3jl2V24BcA0aL99vwCsJoClD8+OoFfANIykvzqooSUP35xJoQ4BZiC06b9jJTyb0KIu4FPpJSLhBD34wDLBGqBsVLKb7Z1v8mVY+eKt57Q03Lc46ovBSvUSu7w8e7+/sppY+l56RQ2TL0ALb1tfllsnITVVEPfwUN+ER2N5eu/JdTavMXfe/1p9Bq4Z4dga+mzk/jhjafpd/wYTr5s/BZv35265dLhCV09sWzkxkcudI0jw43VCKGQP/JehHDqJoyaMvT8/lQ8dyOFo9sGc5dNu4S+A/f4Rfwdfk5deeSAn6X4bWcwLMmvLev+caMo3bgugV/gbDMWnX9PB36VPTcBs7EKNdVZXMX4peg+ZHMN0175cLsf+/tP3yF9wxvcev7h3fRquq74gOu5xe/zyuvv0CBTeOPRG7scDH5TUs0t8z7jgHOvp6D39ntsxTMsxq/y2RMxasvxpOck+bUDGtozg8MH5W0Xw7rF50tKuRRY2u7Y7XH/nwhM7I7H2t3U3nDQls7keSFBCog01YK08Of3ZeiYSXxy3wgq//0gldHb28FGSp9xzDyLLvq7W4AZDpRQ3G8w66df+7NcMNWbSqhY9xUVX33Ax++9ycnHHM7QnukMKtzySml9aTNr3g/z7opVVJet56Vpd3DC6PF8/eZ8Jh+tM/7NeRxx9mU/Ovu1oyqPmhpmn+IEgNIyCCyZjJZViFlf4abupRlB2qYzoy5Onc2LS+qnUZJhO0ftffNiDEPKNn6ByzAaq6n894NUCacDz+WXoqH4M1yGxfgFXSuPWPvRG+Rufo+/dFPgtSNZK4CZSz6grqKUxxa8zTsff8k9x6Vw6YIaHl/4NrddMXzbdxCnvfr0YP74P3DdP6chjx9LYb89uvoyAKKBVxn5597lzqNN8mvnKelw/wtXvEfO51OvpOdF/4fQdIyasrbxQLMmEG5ucOaWCYXCiyehaDoIxXVeL3tsNJtmjkeLjtkwmmsJ5+X/pGnhxroAX702h+ZN3/HNqi+4+6rT2JDTRCAlyOAMk3Fn/AboCLT4n6WUfPnhO8y5sogRc97n4T+9Ra+UMJubUzihuJX3/vXsT5b9ai870oqamoWe15uKuTdjh1ucQtQnx7jbkdK2UNPzUPzp6Dm93FJvoSg01CRHCyX161L7AdAxhsWPN4M2hsX4BaDoXpdfm54Zh+rPcBkW4xds/9bW1x+8Su+aj7hxRGeNrNuveB7FgqjnFr/P+AtP6PQcKWUHni3+zwruOymb0595k+t+n0WWEmbMgR6eW/YBV484psvZL01TefSqY7lsyhSyrnwowRNse2VHWlHTctCyi6laeOf28QtAOPyqC1Rt0WsyqY5KBl+/QMWvFuNnOVqtjYBACGefPX5skFC1hPqJ2EAOEQ3ApG0hQ02kxYCVN/Anu0jqa6r49KV/0NvTzL2n7cf811Vq1jTz0crveH7p/1h4SQE3v7mC0cOd+ov2QIv/GWD4IIU98730TjVoaQrx5Kl+wja89r3F/16ewT5HnMKyZ//eLTVgW1JndXhSEh09BDLSStElDzv/twy3WLhy/m3knXw9qFp0YG1UQnFNIZNKaldXfGlE/Py/GMOgjV8xywTHJJXo750d2xi/rJZ6NE13GdZVfn393isMaP6ccWf95ke/thiPHl/4Nq+9/zmpwuKlNz9y+RV/ToxZ7Xk2fJDC69+2kO6xOaIoRKZXMHo/nXmrgzy+8G3GnnN0lzNqqqpw38WH8/+euYvfj7kbr2/rUxbjGdbGr3SE2DF+6WnZyexXF5QMvn6Bau+y7s3rgxkJU/bkGEDi1unFze0yG6vZMPVC5/8Nle59CaG4RartHyPmKL2zgrCm+lo+WfgofXwtPDLyQPKz091V3xNn53HiPz+gZ4rFe+taOLq3xvHjHmb/AaB0AAAgAElEQVTe365yfz928QqGH7E/L7/1MbccrnL38g8RQjD9jDROm17C2qoQl+2vk+UX9MoQPH9OKpe+1MKzN1+Mxw7x6PUjuHbqwp0SgLV/z2I1LOCYFcYG/TpDdHG6tJTEjiQhFLdbSE3Nwm6p6/bnmVRSP4diDIvnlwTKowwDXH45PJPOuC7AbKhw7yfGL6GoqL4UjOZa9/63d4TN+i//R7+GTxl35o8PvOL5dd7sD8jWwhgC0r2tHD/uYd6YdiNSSvecq17+GMOyufNwlb+9/THDj9iff735EamyhY01TrYr2wdZfoGuwAXDdJ5d9gGtIYOS9Wt5/MW3ue3y7d+G7JWfzcMXHcD4Z+/h2LH3bfXc+PesjV9Rta8Fj+dXvN1HlGHumKGktlvJ4GsX0bYqd4WiUjDyb1QveigunS8w68oxAqUgoPD8e906iZh2hp1EU30tT028jEP2KmbalUeTk5HKtz9Ucuhl93PGUftxUHYzqXoKaarJg3/wcfNbDTRFBKqwGT9lPsMHKQzI0Tkou5nxU+ZzVHGEvukWh+UbrA7AqOcb8CgWhiVZsMbkiU8M/Dqk6ApVLTaIBiYel87EZT/w2sypnHPtnd3+GqFjPUtMAtoAFftXUd0guGqh83xiXY7ERhIJkjPRkvpVaqv8ci4YtIw8pGWiZRYSy9238UtghVq7zLCazaVUvz+Pu68+rkvPt33pw7c/VHLS9VNdfvXNSue3BRH+W2LxxKl+zp7fSnFGiMdffJtUr8dl2IHZjXwdkPRN93BUzwg3PDyX8qpaClIhbNq88JXBox9F0BTI9gtqg5KwBS++8T5TT/Aw8fX/cfU5XduGLMrL5MJD8nntjRfY9/jztnjeNvnVloAEaOPXgjvcY/EMsxSVuuYkw7ZXyeBrF5ZQFNp3qwrVcUsXmoYQCkLR8Of3wRlft/PVUFPNA1ecSK4eYlBmT0bfNYP65la++LaUgjRY9NZHzDnLy+PvVnDhvjpF6YIMjyRs2vyuj8Yr33zPPb8tZvX6Tezfw+LJJWspyYJPSzSuPEDn2U+DpOiCe4/1ctMbIXQhGZCtsq7Wpi5okekTnLaHynF9JBN+5+WRZfMZsMcQDjhxZLe/1vb1LCunjUXqfjbNHI8dbGpLy1umY0woJUJR6HnVUzhfLJsILJ5E4cWTMevKAUmfQXsDSY+1pH596pBMifGr3S8U3YNthKPTHxx+edK6nr22TJNPFkzh2T8eia5tP/8C9c2ccN0U/HYLx1w9icw0fwd+lVbWkuGBkwZpvLHOoE+m4NBilWf+9Rb5ORlMPc5myaf1vLAqjCIk45aaTDjcz2MfrCNVF1x9sMOv5rDDr7U1FvUhQa5fUNFsc2J/OGaAh+M2BLuc/QIYfuhglj/5JjUVvyO3sLjTc7bJr1hBvW05DRIxfv1xuvs3izEsb/gEFM2DwN6hRojdUcng6xeumEuxZZrYRsQx8IyyKuaxYhsRZ58+ugKpmNVWcG611qMoKtKMoHZixNedskyTmbdfQYrdxNNn5XHlKx+QqRlUNkbISxWETMmZewmaDXhyRZAFI/xcuzRESYPNg3/wcfXSEKcNVpn7STWj99WY8YXBoByFjfU2qR7J+6UWioBz99ZZVWnTM02wrl5y1zE6978fIdAiKUpTOGeIhlexGHuwj3993ULpe/PxeL0MPfrMnfr6Y1YfMtTkJLxsy+kaEiLBjBAEVQvvwg42YgcbqZxzE9JyXLyb0jJdI8qkktrVFc8vIIFhIs7U1DYi6LnFCKGy6Zm22eUxfglpO6afXWDYN/9bxpgj+pCe6uvSc3584Ts01tXw+z19vLehjrL6mgR+aarg6Om19EiBLL9KRbPNP0/zM+H1EJqQ+MxG0jUvt66IUJguaI3Axnqbt9ZHULfIL4ktBX8+XOf2t8Oct7eGqgguP8DDhYu6nv0CuO383zJ25hMcP/be7Tq/Pb9URWCZJormwbYttzZP0TxUzPkr0gghLRM72Ehg8SSEqiEUjeJrpnXpee6uSgZfv3AV9h5ARel6LNN0PtxCcTvkHKM7Gf3QKwSWTnGK7BUVFI384TdQMe9WsvPyqQtUYVkWFaXrE5yPu0umEeGNxyfSUrWBqw/xollBdCvErUd5ufs/BlLCujrJv742mb3SYI9chbv+E6YxAsUZCt/U2PRMg8VrTcDk6U/DpHgEC871M3JhkIuHadz9bphUXXDxfhrXLA1zWG8NhMVH5RZH9VV5c73k7CEaA3MUalolBWlw4TCdf375PaWbJ9HS2MAhp4/e5mvZUcWCpvXTryUtLZ3ShXeip+UQaaqJ8ydyiurtcAuFF0/CbKh0h20rQlAVHY2SVFK/BsXzC0hgWIxfIJxie8tybHQUFeHxk/OHP1K98E6yo01CdYGq7X7cpvpamle9zonXnrDtk+MUqG9m7msfMGqYzvJ1Ie452sNd74Yhjl9zVhn0zVQImpJWQ1KUJjiwSCXHDyUNkjVBkyOfMRP4le2VzPwislV+fVVl822NzQXDdDK9gs0NBnvk6RzXu5X/e/51viut7FIBfm5mKqftlcJnHy5n79/+YZvnt+dX8/JHXC/JGMOkbSOljR0JUnTJw04xfrRzVQhBxawJXXq/d2clg69foDrrpBO+dLTMAgpGtRVRlv/jcoSiuh124KBM8fixQ81IFPS0HNet2JvXx5311Z0yImHenX4HfZUA61WDMQek8djHQYYPVtm3QHDO3joflprk+FXqghabWwQ3H+HluldD/O04L49+FOGlrw2OHaDzxnoTjyr5LiAZMUSnV4bCRcN0/v2NSbZPcNIgjctfDnHSQI3/lFhMOcnHNUtD3Hu0lxfXGDy/0mDmlwbVLRKvBnmpGuGQTZoS4rWZU5C2waFnjun29yCm1dMnEIp+SShCwWiuRUqb3JOvA6EQWOwAzi1ZtS0q596MHQkiAKuljs+mjMFubUi2bSe1y6p9J53wOXYQem4v8s+9C3DsJayWOtTUTEAgY1MiALOmFJAuv8DZFrMsq/1DdapP/v0UD4w8ZLuGT8fr8YXv4JVhPIrKcf1VDuqpcs4QnQ/L2vhV2Sq46xgv1y4NccsRHh5fEeHjcot1dZJBOSo/1Ns0hBP5tWKTRdiyO+XXtUtD3Hm0l0XfhjBtaAxLpn9mIIGiDI3msE3dF+8ytMjH8dc+3CUj1ov+sC9LH3l9u4IvSOQXxDHMtsg95QYCiydFp6MAAsyGKrAtqhbcgYwEsVob+GzKGIQQ2MHGJMO2omTw9QtU+w/rLZcOJ+XEG4mEQs5gZqBy/i3ExmvEd9HFAi872EjF3Jvx94h200UH2sb740D3jH9YsegZxhzWg2sfepXLhznP7831Bg8e5yUvRXDxvhpLvjNpidh8Xwd/OkinX5bChfvqrK6yOay3ytfVFh+XW0w50cfIBa1k+AQ3/FanOSIZc6DOrJUGQUPywmoDryZoCEtO31Pjt8Uqxw/QePsHk5H76KR7BCP30Xn84wjPrzKwFAVVWNx5lJcbX2tm7X9eou/+R+6wEeG21FlR8OrpE6hccAfejB7YwSashipkXBpfGiGKRk9BCOH6GkWqNtD8zuM75TkmldTOVjzDXH6FQ2jZxS7DrJZ6QLQ1nESlePxYCCrn305KQV/3uOpLoWLerQn8go4MM02DtFAFfQq2PHS6M8WyXiP31HhjXYQH/+Alxy8YvZ/GK9+ZhIw2fvXNdPi1ptrm9300bn3LqUV94lQfZ81vIVu08etPB+vMnWEQaJXM+8rAryfy6w8DNN7ZaHLmXhpvbzCZc04KT6yIMHuVQVmjRYrPQ4Ynwi2/17n6la4ZsQoh2CdfpbLkewr6DNrm+Vtqalhxz1nUL3vE5Re2hTRNQIKiIiMhii6d6vJLmhHsxkqalz/Spb/B7qRk8PUTa0sdJjGAtP9dzBHaG13xGYEfALCDTRScdzdajgOzWP3E5uduQKiOG7Sm6W4qOT6l3GHA6o/Q5o3f0iu8gZXfhfESYf5qyRMrQgwfrFKc6TynHL/CGXtqzPgiTK5fcMaeGkLA1b/RGbkgxF9/52HxdyaX7a9zYJFKtl9w1hCd3pkKVS2S5ohkxN46C9dE8GuCU/fQmPeVyasX+rEkXLSvxhlzgwRNiWnD058b6AoMyFYobTS4cJjGoByFS/fXeX5NCZ/Pe5A/3Pg4utf7o157Z3PUjObaDnUpQ8dM4tMHRrLvuCdYPX0CdUsnOwWt9RXOnE3LdFyjAWyLSKCEbfe3JpXUz6MtMayptpr0nB4djtdWbXb4JUkcjg0UnHc3eo9+INr8vjY/d0OUZyKh/nHomEnbxa8v31jAxUduO9Bor5lLPsBLhJlfmg6/MhQsCVk+hdP20Ji9so1fShy/bv69h5krI4z7jc7AHIU0XXDy3k7Wq7rVuY7PHqLzr68NIqbkzD015q1u49fF++mcMbeVupBEEXDczBb8mqB/tsL6OhswuO4QD4V+gysO9DCzi0as155+EONmz6Hg8tsTjm8vv8BpgmjPL6uhEiwTVB1pm868zRi/bBOPt2u1drubksHXT6z2HSYxxS6C2O9WT5+AFWrFRgEhqXplElJKFG8qhbGtxy6m1LtbUkq+emkac8efwFk3PY6p+NhQ3YRHU5i/2uSF1aZ7rh3tjrn2EJ1UjyDHL+iZLhi+p8Z7pSapHsHpe2o0RWwaw7DoW5PF35nYEmpaJVk+QUsENCF59nOnZmz2KpNL9xeoQjBib50Xv45g2bBPviBiC+48yssl/w5y0kAVjwJXHKCzYE2QYl+ETxY9zWHnXv2jXn9n6fRbLh3eoaZu9fQJ2LbFJw+cjzTCbZlKy0TQZkmhaB5QVPSsQuzGSkySSuqXpy0x7NMHRnbgFziNcVWvTAIpER4/hRc80HajncCwhnWfccRpx3T5du989h2m4qO6pZG5X9nb5FdhmuC0PZ2se36qwhl76dSHbAJBWPydyZK1bfySQP8sQUUzzFpp0D9b4flVJpftL1AFnLmXxpK1JhFLIm0YkKNwzzFeRi0MogqLEwb4yPE7uwjzV4e6lP3KSPWTYtRiRMLonrYF547wCxn1MVRUpBXdfnQ7wIRrqROp2kBh7wGs3/63f7dTMvj6hcoKtdLz0ikEq0qQSCeVa9tUzBzvFm3HqzPj1Z2tdZ+9y8jf9kVVFRZNGsdZNz2G12rB9mfTLyVMQ0sIkKypMknRBT4N15cr5sBvSwibkov30wm0Ss5fGOTyA3WuPNBDTaskVXdgBdAQlny22cSSMPEILzcuC7F0rUF9CDJ9ghYDDAvW1tr88WAvA3MULthH5/MKiwE5Crl+wcihGrP+9yWHHJ5OONiK15+yU96b+C+fSFMtaopjQmhZJr2vfZ6KuTcTWOIMprWDjU5GQFHd6QVJJbUrK8YvgNaKdXjyByClzebnbkRKu1NEJTKMHQrM6muqGJy7Y19rMYbpZjOmVOiR4hhdfV1tkaJ15Bc4DAuakov31alplYx6MchlBzj8qg9JbBtmrTL4LmBx33E+5q4ymL3K4JYjvFz7aohXvjXwqILqVsmALEF5EzRbkj/01zioSOWsIRrr62xSdMj1O4bNF+yj8WwXs1/79M6mqnQ9xQOHbNf5W+KX4s9ARloRnhSXXyiqy7DYlnJS21Yy+NoFJITACJQikVgttVTMHB+tl3DUtgJx7CaslnoQAqkoHdLK3TnLsfTTN7l19H4E6ps59+anWLexhPkjUhi5oAZvhsaTw72cM78FBchPFUw6wUeaRyAE/GlxENuGhSP9jFkU4q0NFnNWGfTKUHj84wj//NTpkFQE5KYI8lMEtx/lZcEag6sP9jAwW2HUMJ011Rb7Fah8UGZxRG+dRd8ZSCk4aaBGfUhy0iCNi18K8s9PI3g0QapHoNkWBXqQVW8u5ODhl3Tb+xGv+C+fYFUJamY+INj01BhsM0L+iDsw6zYBEFg6xc14Cc3jrCyTSurXIonLL7uljs3P3diW8SWRX+AwzDHvNLrMr+/+u4TxRwze6jntFTNUvfLMI/n0q7VMO9lZ2N1+cgpjXm5FSEl+qrLd/HpihcOv6EtHAQblKGT5BKaUnLmXxtH9VM7aS2NNtcURfTTmrTb4+wk+5n1lMHeVwZF9VapbJOfurXPJS0GOnNFCjg8yfApSCryq2mGe5NZ01mGDuOvdD7Y7+NoSvxxbHIuCOH5p2T2pmH2Tw7AES52ktqZk8LULSMssQPd4MSNhFH8GecMnUDF3IkiJWVtOzeuPOdtZOEWsQlGR0kJTRLfWd8Wrqb6WfilB0lN9/HP26/xQUsJZe2qk6pJzh+qsrbHI9cNpe+osWB3h7CE6fbMUGkISW8JveqqsqrKoC8Lfj/dRE5Rc+2qIO4/ycu0yxy8nZMI5e+tcfoAHRcDTn0XIS1EYvb+OpgiOH6Axe6VBrl8QNp0gTVcEv+vj1GpgQ/9shXP31vl0k8n6ekl9yMbnUflm42YyWdXt70usjsJoro3zN5IkLPWlRES3GIVQEKpG5dybAdr+dqpOcd/+3f78kkrqJ5cQ+PMdd3Q1JZPcU24ARXU+81JSOfdmpBl2T3cZZltd51d9KUP6HdSlm8TmMI59YCbn7KUxIFvhgn113lpvbJFfhxSrrKq0qO2EX9ctC9EUluSmCOpDknP30RlzgIcf6m2WrrWYc44fXRVceZCHU2a38vnmCEPyVd7eYDJiiNPZbdpQF5LsX6gyapjOi2sM6kOSVtNGIuiZl8E7n3233cFXUW4G4YqV2zxvW/yKjYKK55fZUOkyTMRKKmwLs6CoWxf7vzYlg69fuGR09pkRaYOTtA2kZVK18E53C7Jg5D3OLxUNPasAo7ac6oV37rRW32/fW8QNxwwhUN/M/Nc+IM8PI4dqSAmn76Hxl+Um6+tsxv1G5/kvI8xaaTB7pUFdSOJRocVwthuPn9WCIpztwov21Rmar3DZ/joryi1WVtm89I3JwjUmKbokaMA1h3gpTFUQAupSBacOVvnPDzaTT/Tyt/cinLWXxnNfGry1oRUpnQDOH82En7mXTkmLl2OPOZrxF57Abc/9h6b6GtKzcrvtfYm917dcOpzifoMp+f5rULRoRsvocL6e24uC8++ncs5NqEKw77gnur0pIqmkfi6ZkTDQxi93a9E2wW5jmMsvHC8wLbOAsn9c3qX5s1JKIk01XXp+sVmNtxyTxuXzK7jqoBQMG67YX+fCl0JMPdG7RX4ZluSYGS0UpAkCLZJRw3QG5Spcd6iH/5aY7Fug8vRnBi9+bbJ0rYltS87Yy+n01gTsmatw8iCN19aZPHqyj5ELWzmqn8YZe2qMeKEVrwqmBFUBTQiy/IKz982iR4YPig/a7sALQFEU0lO2Pmgbts0vgUjYMdZyeiJNk6KLJ1H22GgOnjgf6P7Grl+juiX4EkKcBEwFVGC6lPKBdr/3AjOBg4AaYKSUcmN3PPaups46TGLHoa3wPrbywDKc9G50RWEHG6lacCd6bjFFF08iEiihZslk9Ly+GLVlUZdoZ6yQnpaz06bMNwU2U5w3mOn/fo8M2czxe+v0z1aoDUoG5SicuZfOom9Nvq62yPJBfdDmtYtSmb/GYEOdxSvfWhxUpNAYgTSPZH0dTDjMQ68MhT8epDN7lcHveyu8tcGiZ7pTzPriGpMFqw1e+sZA4NSA1bY60OudqXBAocJL35hoKhzbX+GNdRYDshWaI5Ke6YKFqw2ksFCiK8bD9y7mjdUfM+R3J++U9yheQiidVuLFjCat5jpM22LFfSMQCMad9lsyc/OS89F+IiUZtv3aEsM0RXTglxVdcBj1zrDsmBu61VKPnj+AwlH3semZcXjy+yNtO8qw3gAIoTJgzKNdGlNTlNW1DuaZSz5g+CCFN1bXctEwHVWBDK8gN0UwfA+NyxcFyUuBTI8ky69wwiCV+qDkpW9MBmULTCk4pp/KsnUW5wzRGZCtcPG+Gi9/a7Lse5PDews+KpcsGpXChS8GWbLW5J2NLcSmHVU02Zw1REcApwzSGfFCK4oQeDWwJPRMVwibkh6pCqqQPPm/egb0zKZn5fZnvWIKtrZgWxbKDkw6cfnVvgwvWmNsG2GkbbLivhHETkvOeNy6fnTwJZyhgY8BxwNlwAohxCIp5Zq4064A6qSUg4QQ5wMPAt0/bG8X0PZ+CO8fN4rm5Y9QU7UZLT3PPa5m5INtknvy9Z3ezqzd5BS02haRplp30Gl3XwBZooWIYfHyWx8jTYt5X0nmrDKobJHuqDbDlvTJUGiKQGGawuHPtJDmgaABfbIEn1fa/ONUH39+I8zZQzSy/Qphy7GmOG9vnac+jeDX4Zj+Gv8tsagLSXwapHkcAniioDx3b53+WQqj9tH5rMJmSK7grY0meSkKdx7t5YH/Rmg1JB4NLNXLs7dfBsARQ3uz8OV13faebElCKNH2awtpW5T/4wq3lV7xpwMCaVsUjLwHEfU8kraJqmmUzLmZa049mKwehQn3mQRa9ynJsK5pez53W+KXntOLnOPHUjn/lrau7XhZZtRqBaRtsXLaWIzm2p2SwY9lvWaemcIls01qg5JnvjDcRiCn3F7i14UzHDtL8kGpgSJwx57tlSdYsMbgqgM95KUKbBsyvAqnDNZ4Y53Jm+stLtnfCa5yUwSV1RJbStI8AsuWeDTByKE6ioDxh+m88p3BaXtozP3KKae4+xin/uz+47xMeD1EmkdQmJ/nMqwr2rc4lYbaarLbsWRbiueX2RhAWiblj48mnl9CdbaJiy78O9DGL8s0KZl/mxuIxZTkV/dkvg4BvpdSrgcQQswDzgDiwXUGcGf0/wuBaUIIIdtPhU4K2LKPjuJJIffEa6j69/1gmRiBUmqWTsVqrnWGONsWEsvJkikaij+dvOETUDWN0nm3divAVFVh5pIPOKo4wh/3SyXbJ9EUePSjCM+vjHBYb43PNlvccKiHW98O8+fDPYx7NUR90IHQ4b1UVmyyWV1tk6rD3K9M5q5qdlaEEmycZqeB2SpvbTBpjkhCpnSKTTWoD0p01WnRzvE7wVhxhsKBhQoL1hhoiuC8oRr7FzodQ++XmGT6VNYEIjy+8G2+WFvG5BvO65b3ojPFsgPtv3w8eX2JBEpcHzbTNBCqilAUapZNo+gSx9dImhF0jxclJRNN0zu09ieH1narkgz7CSUUxclyBUoJvDoVO9joBlxtJwnU1CyyTxmPtE1Kd0IJRSzrFWptZMaZfvJTBI98HOGDUpP+WQr/K7O44ygfd7wTxrIlqR7wa87Mw4m/93L9shCKEEgpePwTg8c/aWsSsoGIKREKLF9v8daGIE1hG9MGr6pQ3eJsX56xl0a61yniz0tROG+ozjOfR/CogjP2dLYoL9hX5/1Si7OG6Cxba7Ju/cYuO90DaKpCeNunAVvjVx/sSCsIFRXb5ReAUDRq3niCwlH3ufwyIo61TpJfHdUdwVcxUBr3cxlw6JbOkVKaQogGIBcItL8zIcRVwFUAF024lyNPH9UNT3HXUryPjvb91+7xyvm3UTF3ItK2CSyZjNC8SDOMkpqNntPL7RoSqo5RW4ZQNfScYnSPd6dsQb7z2Xd88U0LT32Y6EgVMiUlDYbjAB2wGTVM56sqmz1yFL6vtTlhoMpXVZLJJ/i4blmIB47zccPrIWpbJV4gRYfGMJy7t8aNh3mpaJLc/k6YUwYrqAIK0xVaDcn7JRaLv3P8wNI8wvXUCVuQ6ZV8WGYx9mA4b6jGi2sMyptswibMX/YB2T6Ys+xD8G5f909XFfuSuHr4IfS+bCrlsydiR1odcAF5p453okspEVGTwtjooaR+cnUbw5L8chRjmPb9187n24yAEC7DAKoXPYgdbHL5BU7no8svRUXP6+18ke8kfpVVhHhoeX3C8ZApeU9YjBqmMzRfpShNkOFViViQ4xMcM0Aj2yfI8AruPtrLX5aHKGmwyfYLyhol2X5BS8TJ0p+/j4fLD9AxbBi1sJUxB3poCEu8Knxc7vDrhdUmeSnCCdokGLYzbOmjcovhe2hO/dm/HDPXxz62KUwX1NZ1zem+q2rPL8BlmNnofORzR96DO5tT0ZCWQc2ypKP99qo7gq/OzFjarwa35xznoJRPAU8B/PPd9clVZVSe/P5oGXkUjZ5CJFCCntOLzTOuQ9N0pO6nYuaNSDs690xRsVvq0HJ2btvvoknjmDz7dZa/+TZDsg3uOdbLxOUh5qwySPMKjuuv8fCHER492cvYJWGmnezj3IVBAE7bU+PgYpVj+2u8tdFk5FCdGV9ECJtQZzpdii+uMXmvxKay2ebsIRqfb7a591gvt74VZvbZPkYM0ZHADctC/N8JXiYuD/O73iovrDbom+V0PM5ZZXDdoR7OGqLzQalJcwQMO8jhfTN49b1P6fO7fjv1PRLSdsY6NdVSMPJewPnS0XKKUYQgUlsWPdP5qFfM+St2xHmPBE6NTATB6ukTEty+k+pWdRvDkvzqKCltUDU8eX0SGZZVSPmTV6J6U6mYNd4x8LStn51fC9cYKIrgvL11LFtS2SIRwGOn+DlvYSsnDHCK5C/YR3dqw/bUWb7OYFCOwmvrLOqCkr5ZCqUNNsu+N3l+lYFhSYrSFU7dQ+XaV8PMPNPHuUMdb7AbloVYfEEKpQ02d7wT5rBeGu+VWJg2vLbO5OYjvJyxl8aKzRYX7avzYZnF2Xt7eK6LXl8bKpvo5dt20X28YvwCOjAspXAAkXAIs24T0nYW4FJKl2HOtm3b9rHqS0kyLE7dEXyVAb3jfu4FbNrCOWVCCA3IBGq74bF/1aooXU+sgEraNlZzHZtmXA/RIMtqqcfCGdHhyFmFaDnFVMy+ifwRdyCljHYcda9qm0KYpsVrH33NNxVB7jvaj23DkDwFRYHzh+p8utnitD00Xv3eWcEVZShcMExn4RqDaw/1cMlLQa471MPol4JEbEmax+kiyvHB/T0GJbcAACAASURBVMd5+fMbYe471svNb4XxaYLhe6p8UGoyfE+NgjSFZsPGsmH/QpUTZ7XSJ0vh+1oTG1hXK3n0FGd496T/hVGEwLAkigI9UhQ++qGVIwdmsPzrVRzV7e9Om1RVjQ7Xtqhe9CDgBFRWYxUis8DZYrUiCITz92ypT+j8QlERCOqWTt6Jz3K3V5JhO0vSdirHVa1ThknbIvu4MQlhbDy/bKP72RWv9vw6pFhl7iqDUXtrDMpVmPWlQWGa4Le9NPLTBBcM03m3xOLdjSb5ac6os8N6qTz5SYTmiIVPA9OGe47xcvObYf4x3Mezn0fcYO3VtRanDnb4pQcd24pj+mscOaOFloikOEPh8woLCdx/nIfrl4X4xycRZ6FmSbyawJaSF87zMe+r5i5lv4JKCqnpmV16f3r1H0zZhrVIoXRgWChQhm3bSGm7vm1WU43znRRlmJ7XFyPwAymFA9wgLilH3RF8rQAGCyH6A+XA+cAF7c5ZBIwG/geMAN5K1kpsW5Zl4cntRcSdhyYpGj3F2aoSEKneSM2rjyBUHX9+H4xI2O0qEkKg6E7nz84w7fQXDKCpNczv9xvIXlopg3KcTsdHP46Q5RX849MIUoJHBZ8mmHmmn+oWyfEDnE6gpz8zqGmVLF3rDJTdUG/TFHYCsBy/U781cqjGuFeDnDNEZ8Eak0wfVLVIVCGY+mHba7KlJNXjPMaF/wrSajizIPtnKZw9ROP5lQaaAtkpKqm6pHemk3H7pDJEbe06mhvqSMvM7vb3CCA9pwcDxjxK+ca1ePMcr6PSZ69PcPEWCPQefVHTHcsLPc8ZJmxUb8QpxjeINNWyctpY9353VhHybqokw3aWhNI2Tqsdw6RlUDFrAp78ARiBUrSsgp+MXzG159eMLwx0FZ770uCF1QZB02nqueFQx/T02H4al/w7SJ8MaAoLZq8yuGCYzsihOq9+b5DlExSnC8cEeh+duasM3v3BJGjCnK8iCfyK//CETUmGT/DM6X7OnNfKiQNV9shVOHMvjUXfmoRN8OsCvwZ79dDJS1W4cJjOM13Ifhlm1weWTZw2l1suHd4pw2Kd2kIoznxOZALDjOqNTi2ytAlWlSQwLMmvbgi+ovUP44DXcNq0n5FSrhZC3A18IqVcBDwNzBJCfI+zWjz/xz7u7iLN4yUC7tBZIQQS6UCtEwmhYNaWYzXXsvk5Z6UhLRNvWibQfRDL6z+U5V98yItvf0Zro81/S0OU1hv4dcElw3T+W2bx+14qj6yIcP4+Ggf2VNjUKPGlCY7r72wNTj/N7xgSRiSaAK8K007xc+97YZBwxl46z37hjOMoTne6i4SAqSd7ue3tMGFLct9xPsYtDTHmAJ09c5X/z96Zh0dVnv3/85xlksm+kYRVQHBfq6W2b/fal4q41n1BtLS1ikLlV627trZaW1QU61JawQ1BrZYCamtb31p9W7W4on1Fw5aEZDLZ95lzzvP745lzZiYLSUyIiM/nurhIzpyZMzMw37mf+7nv783Xppisfsfjkhk2HQ585/AQz1e4fG68wZ8rXKQ0qGlXQtjSDSd+tphX1j/G18/8wcAveoRwWqLBCtJHGBZuaz1Wbgm1q65BdncEqXyFxBMmY45VXa6madL23O2j9pz3ZLSG7TpMy8LzVNdgLw3rg+HqV7S5c0jPz9evlyq7qGxSnYxZtmB6scHMvS021rkUhUWgX1PKVUCk9CvEJc92seJNVS6RbcMvj87kphdjtMUk5x9u8d8PdRJzYWKewdYU/br6r0q/fn1smIvXdeJKOPtgm5IswYn7qa/l8XmCb+9v80aNx/4lBq6UPF/hUtkiOfL+FgAyEINyuq9taCGWPbQux53RU8P8Lu2khl2N19macrvqpPI1TOvXCPl8SSnXA+t7HLsu5ecu4NSRuNanAb/TpDEawc4poru5Lrhtx4qF/u4i0nVwO1qoXXU1RjgvOMdtbwTPw0yIXbyrlXBOeETdhicdOIO/LH+KSWVFVAuPxtZOXC/Ot/e3eHG7yz2zMrlofRcZJqx73+GFLS7tMUlbDOzEQviIcSYXHhli8cvdlOeq2YuHlJucsK/NQ2/FOfdQm/MOtbnntRjXfSWD767pxDLh9RqXLBsm5Ru8Vu1SkiX45t4WG+s8Ttnf5m+bXcK2IGRBtEPy9Skmf/7Q4YR9bdZtMSEjjJSShrZu1vwnTk7DS6MafAnTomT2IgDqn12KdJRHDgLc9gZob6DsjJ9j5o0JvrCcploa19/G+MnJ0Sl6aO3IoTVsZEnVMClBOt3KqNPzAg2TSNyOZnYsX4Db3hho2EfVLyEERm7pkJ6nr1+O6wHNHFQqqG0n0K8PGlxCpuCFLS5tMUlHHGwDSsIGy16PM+cQm6f+E8cQgsJMOLDMZPa+Fo+/6zDnUJuvTzFZt0nyk69lcMHTSr/+r97jxP0snnjPYWyOYHKBYEcbHD3VItohueDwEJc800VnXNW+HlJm8OS7cfYrMTnpgBB/+MCgKGGYasGgnO4/rG4ku3jskN6bneFrWJp+QVLDOgTjvnt/MuC2QsSj29M07NOuX9rhfjeiP4sJPAcMi7LTfoJVND7lBkntI1eAEEw8fwnxmBK4HSsWBm7psGvchu2MDNpDRfzu2rkAfP6Cn+EBD76p0vAxDw4qNXg36vLcOVl0uZBjw2eXtRN3YZ8Skzv+GeMLE00ybcGNX8ng5//oZnOjx5cmmZz9VCcPvRUj2xaUZBnkZwqyQoKxOYLfvh5n/xKTrc0u21sk3z0iREmWcl7OSxT6/9fv2sm2BfWdkgxLWVZcPWsiG59o4/FfLmRTVT0PbS3hsG+OzvepaZqB6aSRmUPi2wfpdFN+7mKEZROvV8X39WtvU0XKJB3B+8sUaDS7E31qmBBgmJSeeiNImaZhNQ8tYuzcJexYsZDys28dtn7F7VwqI41MKB1cGcGaxfMB+Olv1/Lg03/m1SqPcw9N168XzsuiPQ6OlBz7SAemIfjKZJO3Ix7VLR7NXXDXrAx+/mJCvyaanPNUJyveiJFpC4oyBZPyDcIhwbgcwd2vxjhmmkVplmDZhhh1HXD+YTZjsoQaiwYcMdYMNCzaITmg1OLXs7PJLyrhzRalYUOxmVj54vvsf+7QvcFS6UvDAv0ybRAEGhZZdQ2Q1C+R0DNNEh187UakWkwAwR67X6goTBvDzkjzUJGei9fZkqgjkiAEblsD7ij8Z5/65VOY8b3zCBseXZ0dHFBiEO2UfP+IEFMLlWfNC1tdMixBdki1UX+m3KCyFX49K5NLn+miscsjx4a9CpQr/rpNDld9McRx+1j88f/itMYkT54W5jevxykKC44YZ1LXIbn2yyEWPNcFEp7YGGfZhhidccgOQV5IsP8Yg3uODfOrl7v562aHh7+dA/FOZk8zWLH2Jd6Owozv/XKXvTf+l1BjNELVlk0ptwiEaSEMGzO/NEjX+7GVEIZaRXouTkMVmBZ4LsLUH1XN7k9/GrblrjlklU+lM7It0DBQ21Hx+krctgZqHv5RUr8+Yofv5M99i9+//BiXnvjZAc+NNrUx96fLcRyPyuoavjXNZv2mOOccYrNPcVK/QqbAtkAg+NwEk73yDV6r9rh3dianPd7BsdMt7nstxjf3tlj7vsONX8vgtAOVSbSMSx46M8zqjXGKMgVf2sui04nzz0qXB04Mc/4fOgkZ8OR7Dg+8EafL6VvD9iqwyM8UaRo2WIf7WNyhwcsiYwidjqlBdKqGmb6nV0LD0ur5/EBLKB+3VP3CMDGMoTvr78loRd+N8Vca8bYGpOch3TixSAVIiCf+s7vtTVgF5ZQcsyDIjgghqFl5ZWBkt6uGm77/yt9obmoknKVasY+aYOF4kimFRloW6ksPtFOQKYi0S7odyVmH2HgSphUJ/vAfh31KTB5+K84x0y3mPt3JijdjWIYg7sCUIoMn3nV4bpOqJ/tLhcN3Dg8xLk/VZLgezPtMiMYuyWmPd0BM0BaTVLVKZvymHYCYKzn83hZsu53Swly8dzfw9QuuwrJDu+R9geSX0MZli6hfm+xU7G6pQwgz4XCfQAjl0eY6yESaXgXSYBeUE2+qwcwrxXHi1GyvoHzi1F32vDWakcTXMOm5dNSo7m1fwwCc1npqHv0xVkE5xcdcCiRqwlyHuiduoGLZJUPSr3FT9+N/1jcNfCLKZPXDiq1k2x7FIcmzH7jMPdQmL0PgeEn9+uID7eRnqgyU60le3uZy+sHKJsIwlEFqa0wZPz/9H4dH347hSoFlqEXl4+86rNoYI9MSvFLl8pW9LISAbFtwzLSkhrXFJCev6qCtO13DJBB349z2zximqTRsKOOFXnpnC8X7fX7Q7yGkB9GpGhZvUw2+UpLUMCGSTRGpGpaiX1Z+GUIIrWEp6OBrNybe3Y0EPM9Ndsf5GRIj+U838bzeq0PDMHfpYNPWpgY2/vUJJhdajM9y+LARXql22dHqsXZTm3oOArIswdRCg9tnZnLPqzHWfeBw0n42e+ULkDAm2+C+YzP5/rouTtrf4tQDbH77eozppYKtTZKrv5TBxeu6VDpeSLJsg+8eYdHlCM46yOZHz3fTEZcUhwULj8qgqUtywr4Wc57qoDUG/3dpIb97vZvfvWPwj2VXk5Vpc+6d/8O+X9j18xyBXiv3f99yOnZOEZklE4h1d4EAwwqplnrTwi5OeBslfJEgUYTcWI3X2UrVI1fSUaLqWnZVUK3RjAQ12yvU/3FSFhQ+ApU5ATLyxzCuh4Z1R7dRWFL6kTQso2wam6vrmTKuuN9zok1t/P4v/6I4LLni8zbzn1H1qU++5/DgWyoDlWlBji3Yu0jp1/I34vz29Rj5GcoD7E8fOpywj8Xft7ncPzuTC9d1cex0i0ffiVOarepbf3hUiCuf71ZuGwZ8drzBhmqP27+ViSlU0PWDdV20dEsOH2ty2RcyqGz2+HNFHBB8+4AQRVlmoF9D2Wr0+d0LH/LFi+YP+X4+qRrmL+jbuhwyS5RWGZZaxPoa5uuWXTQBYYUC/RKGlaZhn3b90sHXbowE7KLxmFkFAEGthNNYDUIkvqil2nLsgRdP7wzqr57so87YevWZVUwItfDlyTb5tuCZTQ4HjjGYuXcGc55Wdg+WIWjtltS0Sb71cAe2CSfsazE2R1DXIfnfKpdvTLFwJRxebnDGE504ngraHA8OLDU5rNzkhP0snk4UtZ5+oE1xYgZkU5dQ93uyE9eDorCgLEcw7zNqLEdjF3z2/maKssygK+idesGMc3485Nc7kpiZWVQvX4jjxJOjVRJbi9KJI6wMIquuwcwpCu7jtjUSHjOBMLFdGlRrNCOF67oIw8YqKEMYRlqtl69hIIm1NvTSMGFY5GUkfW2Hol+TPzeL3//vchZ9u//g68F1LzPG7uJLk22OGG9x0v4ujiv5wZE2kXbJqY934HpKv6pblX4VhgWmgFMOsCnLEby03WVzk8fx+ygNO6zc4On/ONgm2IZgSqHggwYv0K/isIHjwqx9LPYpVtM+pITPjjP59uoOynPUBI/iLMG4XIOmLrjvtW6mFtuD7mrsyY5oM2bRpI80THtn+BrmtTcmR0N5LhgWXjyG9Bx2PHhZUC7htjUSyi3SGpaCDr52c6RM7K/bmdQ+egVuexPSVZ0lwjCQrkvZWarl122JBAWOtSuv4qLZM/DiMYRhIgWUnfZTTEv9k5umSfnEqR9pxpaf9cqLdTB7WojisEVls8szHzgcv6/FSfvb/LHCpCg3TENrJ5NzYhxSarJhh8uT7zr8bbNLpMMjbAlO2s/G8eCCw0P8e4fHvsWCF7e5XPOlDG59OUZVi+pgfPYDh5Cphtiu2qiG37bGJHEXphcbHFRq8OhbcQxh8JXl7WRagjtmZrLozzEcKxvLNPjtM69x3MU/HfJg2YHo64uhMRpJq1nZuGwRblcHnudScPSFALiOQ/1zd1O/7jbctoag28vrbMHMHUPJrIUIwyCzZALVyxdy4LzFeiaa5hOHlMokuvbRKwDVzSjdhMmqqwbKW4XjcZoj+Kn9yKprqJceF848LE2/QNlX7Ey/yidN5bmnO2nv7CY7nNHrdj/rZXZ3M+fQbGKuy/mHWlzyTBfCyKA8x+Osg0Os3awClsk5MT4/3uSYaRYnre7gTx86PPZOHIkkZCoNc1M0bEIu/KvK4/HjwlyyvosFnwvx7AcOX59q8q9Kl79tcXn4zXigX4VhQUmWoKbNoyzb4L06F9sU3HVMJgue7aJLhMnMsAbV1diT6x97hQNPvWrA83pqWGM00suVfuOyRXRFI+o9PkM53UfW3kF0zS8RhhF0rArDRBgmJbMXYVihQL9SGyg0OvjarfDbs33qIzswswowwnmUnnI9hp3BjhULKT72MuqfWaIGabdElaglRnMY4TzKzrgJM7uASfPuZvsDCyg5dhHRdYuDOY+g0vofFT/r9dUpNiFT0tAhOe1Amz9XuJy8qgOE4MBp43j+7suYeekS3q/YwvVftijLFswYLzn3EJuFz3Zy1ASL8lxBzIUpBQaz97F47oM4x0yzOLTc5Ph9LdZvcjj1QJsT9rV58l01tqixU1KQKehyYO9CgwdOCLO1yeOvm13Wn5XFvD92clCpybRig7MOy6Zo/y8SC+XRMP4r7P3Zrw/736knPYuMQRUap9Z6uV0djJt7B5UrkvUTjhPHbVf1KSqgbknU9jkYTjfRtb/CbW8ilFuEmZk14s9boxlpUjXMt5kwwrnYxRMoP+sWAHYsXwimiYx343W2EF17m8r6ug5GOI/yM3+OmV1Iyez/R3Td4kC/QqVTgmajgfTrkBMu5Oer7uVnc7/a67bUrFdJtsGmqIMh4PByk68+0MrYXINIu+TAaeMwTZNNFVu44ctq5M95h4Y49xBVGvFWjcsR4y1KstVcWV/DVrwR48yDbPYvMZi1j8XLlS4n7Gvz4Btx8jJUB3ZJVlK/Hjo5k+1Nkh+s7+LRk8Nc90IX04tMJhcYnH1oBu+6Y3nq1ouH/G/x1w0fYE79AgXFA9tvDNTsBUrDys+4iabn76V+7W24roNM1B0D4DnJn4Ho2sV4na1av/pBB1+7ET3T5xfNnsGY46/ALpkYjNlwWqJE19yK19lC6ek3gesk0vnK/KvmoUWqe6i9ie2/uxS3vTExwLaFeFMNrmEGe/UflU2vv8S2re28sc3l9pc8/NWqQJKXaTCtyMTOUnUAMz+3P4eEaxBCsvSVGM/PCeNJVVT70FtxVr8bpyuuUu2ehMZOyU+/ZtPQKTlyrMmcpzt58M0Y+ZkGUwoN7pqVybpNDn+pcDjrYJvj97Voi0nGZAvOPMjm/g0x2uNw/D4W2TZ8abzHhU/8D8dfehMHzTh6WK97Z9Rsr8BNrOZBrQid5jpe+9kpCNPE81y2LbsYI5TJIRerFaAfoBXPvgzPA7tETbjZsWIhJccuwiooo+7RK4IVo0azu5OqYVfPnU1bl0PhrMsw81UAULPyKuUDBUq/PBercFxwn5qHFhGLbsNtb6TmsasQQgT65W9vuYbZ56DNVMbutTcvvzqVtytqOXhqWdptL2x4n1e3dfHKNo/F/9uF9CTKqR3G5Zm88YMCrvtrO287GXzp0Ol8rqAeRJylr8b4y7lhXKnMm499tIPGLoeVb8cJW5AdUhpW3yk5ZprFOxGPz441mfuHTrJsmFposOQYpV//3O5wxoE2x+1r0dINJQn9enxjnIZOOHl/C9OAoycbPPHMFuqb24dU79XVHefe5zfxjQWDq/Vqro+mdWW7jsP25ZfhNNfx71tOB1TtceQPtxLKzuOQ+fcEAdq2ZRcH31Og9Gvi+UvoqKmg6dk7tX71gw6+PkFIKRGGQfm5i6l55HLs4onE67cr80InhjBthGFhJwbSlp19K05zLVZ+mTq/aIIa9zBMvn/rw8HP911+Dk07NuO21vPgSWEKwwb/sznG1X/9kP/bVsufX3mP1//TwW9fk0zOF5yyupOls8LcPjOTB96I8+CbMSblqyG0AGcdorqNMiyYMcHk1APUwG0nMeD2pFUd5GUIIm2SaIcqgO1yICekxC/uwtzDbKYWGWzY4fLYu5JZB44hUrl92K97Z7iuG4zeADUSJTmjUQWnDc/fj9NYzYY75gXneZ0tRNbeQfG35hOLbkMkJhcIwyDeUEW8rSEtG/ppL1LVfHKRUiK7Oyg9/SaiaxcHeiRMG+nG0/TLyC7Ea2+k9PSb0vQLwGmo6m/ARxpHHHset9x/BcsXfBPbStY8+d5ePscvWsq2HRFaWtu4+9hM3q2LM3Oaxao1W+jqdtj4QTt3/SPOlALBtxP6VRQWnHKAzWNvxyjPVfrVFgfpSc48WA3ctgwozxUcv6/Fyrfj1Hd4nPhYB4WZgminZEtznAfejNHarfQrNySIe0q/JhcYtHRLisIG3/9C/pDrvX70wN854pyrBl3r5UkvTb8Aok6MstP9ucGqPq/h+fvprNseaJgwTJW9XH8HxbMWBPqlkL30C7SG+ejgazfGMgQ1j10ZFNwDiXla/rovaS0R9BEJEoWsBLPR/HP84yPJ9299mL+uvId9djzFfgeUAHDaFNhOhDN+/nsOmVDMhcWtPPpalCPGKX+cb6/uoCgsyLZVxmv5iWHOe6qTTkfyzCaH9ZscCjIEjgTXU474vz5W1T+ETGjukoRttcoEcDxJ2FLFrNWtHre+1M2NL3QjhWDs2HLMVo+cjtF1sQcJhomVX4bTXKuOxLooPf0mQiWTAusQp7mW6NrFwerfaVTznDNLJgyr40uj+bjJycmlqW5TmoZ5nS3BUO3+SLNh8Y8hVI2rN3j/woxwFlO/cS6/fOIPXHXGF/o9b83i+dz2yJ+g6t/81wHJwdPfjzbD+P35/MF7s/yp5wP9OmV1hzJ9tiEvU/DQSWHOeLKTz483eLnS49kPHP5nq4spIOaqbP60IoOqVoknJY4nyDAFHXFJlqWCtM64xPGUfv3q5Ri/elnNfswIWZQWhoZkLfH7f7wHU79IUem4gU/uByfWjdIwFSLYxROI11cGGmYXTVABsxUiXr9daVh+WaBfAKZla/3aCTr42o25c82/ehVC1nc0IYQfYxmACLrlMExlVrjih8oVPwURyqJmxQ9x2xsI5RYTb2uge4TafTe9/hKvR7pY9VZ6Vi2ntJz/rWjg91sb+NxYg3fqPG6bmcl5T3eSaUFzN5y4n40AvjbF4vmKOJMLDN6v92iNSdpjytn9jINC7F1kcMK+Fo+8FScrJHjwxEwKMtUbsfDZLh77dpjibMEZj3fy16oQP/7NHyidsDt4yaiAt/6ZJThtUXAdYpHNyZulh9fRlFj1D2I5r9F8QvC3INMMO7vU38EaUQiVjff1q13NdDRCYbz2xuCxREYWO1YsREqJ1640cDD6NemgGby2bRPPb/iQoz+zd7/nvbDhfaoj3Tz6diTt+Lja99lWU8/RU9Wcx9tmZnL+H9RC0ZOCE/ezMQR8IzHCbGK+QX0Hafp12oEh5n3GZtmGGI++Facz7pFhCVacGGbvIgOBZM7TXaw8Ocwj78T50wcO9piprL1t6IXpr2+q5smNHXz5/BOHfN9Ues7TqH3sGrzOFtyOJnAd4tGtiROVsbfX0ZS22NcMjA6+dnN61oHNP+4ovJZapOcinRhWfmmw+rAKyjHCeRTPvozalVepoMx1cBqrKfqm6rCrfewqcjItckr2HrGJ8qnbkD1pbWrgoR+fTp6ooqXbY0uTx7HTLSwD3qz1WPR5G4nghH0tfv9enFeqXAwh6HJkML9x4VE2MRfOONDmmQ8cjp5qMaE4TFtHFwKYWiiYemcbRmI4t5QOWbmDGy/yUegZEDfXR3EevRIBhPLGAOC2Nyl/m5Za6tcvwW2tU8FyilO9EAZSekjXpebBy9RBw8Rta6R6+ULibQ1MnNz/F4ZG80mgZx1YKCND6ZfrYOWpWjCnuTapX8deBq5D7erreukXnkPdEzcycfLg9euIY87ioYd/SUZoK186aK8+z+m5FekTbWrjtMuXkJVhMHsf5XB/4n4270QcYq7g8v+yKcw0uODwEE//x+HNWheBwJMSS8CkgqR+nX6AzV83u3TFPY7Zx6Y0x6CpSyIlHFxqcPA9bYQsgetJqKsYcp3XK/+p4s4X6/jy+ddg7GQh11d3tnRdttx5dqBfQTOQENSvuwO3tQ4juzBNw3z9svLLkK6rGim0fg0aHXztpvQ75xEYP3k6TTn5VN5zfmJaPGnp+NrEXC3f7M7vEIL+zVdH2gfM59VnVnH0Xh6vvG/wxGlZ/Cfq4myHh9+Kc0iZwRs1HqXZgnG5gu8fGeKe1xzefOwm5v1sOV21H/KVvUxMQyAdVVR/zDSLh96K89g79YRMQacjsQ3lsXPfCXkI4Ldvuryy/rFdts24s+5Gv7j0tZtPwy6eQCy6Dek5yTqX4olqm1hK4vWqDk0YBsWzlM9R7aprwXNGPEDWaEaT/vSkuT7K1IlTqc/Jp+7RK+hujigriRT9iqy6JqhvDZWp7HVql2NhSWmfn4udadiP73qUe1f8grK8CPtMGvzw7QfXvcxXJ8IL70sePMFGCMHcw2yOWxnnW3sb1LVDXbuHbcCpByrbnGcrBNOnTMJt2NxLv47dR40iWvVOnBe3SVzPo7VbvfZDyi3uOlZ1BT7whjOkOq+XNm7nvn818eW5VwXDrPujL/3K6KFfVVs2sWPl1Vh5ZUinOyiXqF6+ALtYFdYH+pXI2hfPWqD1awjo4Gs3pa8PCMCGm0+lYtklxOqjgKD09J+i7KJFsIqsuvc7SDdO9e96r+b6G3Da3/WG68my6fWXePHDBk6ZDi1xi3H5Fgu+kEHI6mD1e3DBH7uxDFXMCSCl4PybV/Lihg/wPJeXtzvc/GI3tgldjtqqyA4ZjC/M4uHv7MNlqz6gtqWbSKfHcY9LMrNzAMiJD6/Ga2dCDr27G10nTndL1FsWxgAAIABJREFUHa//4kzyi0vwnBjVv5uvalwgrc5Fyp5JfQiVTgHAzC6gICes6yQ0n2gG0q8w0NBSh9Kwm9L0CxIaBr00THa19ptN2ZmGCSH43KkXc93yn3L+F9uYecTgShJe2PA+73zYwgnToCVu0NIMIdvk5P1D/LHC4H+qJTgx8jJVwDMuz2beUbn88m8VmHi8UeOy5J8xisKChi6JbQjG5VlMLw3z1PnKdPb4Zdv4TyTG9o4Qc9b7X8kW45zB1Xm98NZWlr/eyRfPvSIIvIarX43RCNKJU/PQIpUB89yg474XiWuGSqdo/RoCwwq+hBBFwCpgMrAFOE1K2djHeS7wduLXbVLK44dz3U8zBWPK+dnytVw9dzYNdbVY+X0bhgorlPDOcXHbG4LVpUBZWBjCYPxeU4KVSVNdDW8t7R2sOK3RYT3f79/6MPddfg7P1mzj2fWpt2RjZbSRn5eTdr7reryxOcqcL47nvM8WceZv3uWLEyQXHhni4bccnvwwA+l0s70pzkkraoFcCOVihaCgdNJOt0CHws6EvLk+itXdHYx4kp4DqHFJXiK4FQi8rjZAYIRz1WDZUFjV4/mvNdFyLz2v10QCzeigNWx08fULlA5J18XKL+vzXCtXOdQ7LXVp+rV9y4dcNHsGliG4c82/ABVs+MagqZiZWfjjpMPZuXzj4lt54slfE3c2Mftz0wd8vmsWz+f4RUt5sTbKi2n6ZXHQ3qrBqDqS1MjqbvjNK63KsmLRdA775Yd8bbLggsNtlm1w+GOFQUvc4Z/bujjybr++LJO8/EzGlZb0u/3ZH3/eUMHKd12+cNZlaRmv4eoXgLBsLMvCBTBMhGmnaZivX2Z2kdawj8BwM18/Bv4ipbxFCPHjxO9X9HFep5TysGFeS9MPqYWOfvq+7LQbEYaN9ByiaxdTfu5iNWVeCIRhIT2Hqid/kryfMBg3945ej7116ZxhP7+hBkT3XX4Oz32wjXX/V4PrSOYeHiYcMjjtsDAvtuZz7i2ryMnfdTVdO6OprgZXqk5Gf5A5hokQBsIwGHPKDZiWRbnjEHniBlINiUpPvRGRmIOGlEGdl0x4DAEDbhloRhytYR8zPQu1vXi3+iwd9yOk51G35hdp+gVq/FDtyiuD+7S1tVJ+xk297BKqly9UQxpTmPHti/jj08vYUvMqP5h9BKa580aXoQZExy9aSnUkymF31tASk5xyYAamaXDWYTZvt2Xw+C8XfqQZjal4nsflD/yd9tLD+PwZZw/pvq4T76VfPr5+jXHiRFZfT06mRXdL8r6BhqXoV/l5t7NjxULVCKb1a9AMN/g6Afhq4ucVwAv0LVyaUab+2btBujgtalWW2gIskQjTSlvl7E74wVpPC4sy4Lia6C6t5xoIKQzMcC5WQXnaoGCnsVqNUFl1LVZeCU5LnbpBCGhvIrrmVvWrlUHxty4Gmcwq2sUTgpo809SVAKOM1rDdEOl5RJ6+GWFauO1NafrlBw6yV0/e4DnyxHlUvPMqF9/zMNeePoPxYwoGvtMg8YO1vuwrZtc0f6QZjan8892t/Grtexx8/PfZd9pBQ76/FEpzpOukDzsHIo9fn1J/J2muj4IkXb9mKrd9X79qHvwhRiiMHcrQ+jUEhvtOlUkpdwBIKXcIIfqrZMwUQrwGOMAtUsqnh3ldzQBIp5txFyxVHSio/XjfWyryxA14na14rstFs2cA4LkulQ9fwYRzfvFxPu00+rWwqBltz650DDuTGn9obEK73PYGrIIycF3GnncHO5YvoHjWAkJFE4g11ZI6s6521XXqTp6DVVCO19UWjPFQYzh0+n4U0Rq2m1J+7m0YdojKu+f00i/Z3QmIpH55LtbaO5g497adP2gKUw/6LM1jJ3P543fx+fEG8751GJkZ9og9/53ZV3yU4Kuqrok71rxOY/5+fH3+r4IF25BxXWpW/BDpuUkNEwCS8RetoOZBta1YfMylmJaaW5mqX9F1twXzOe0CtWXsa5jWr8EzYPAlhHge6Kuw6OohXGeSlLJaCDEV+KsQ4m0p5Yf9XO97wPcAzll0E18+/swhXGbPoeecx9TjPlJKqu79Ti9/KOnG8Tpb2LFiYbAv77unm3ljkN2dKo3fWE1WuSo87aipILp2Md3RbbiOk/JYLlfPnR1cezS7V0aqfmukGXPsAlxPBmapADUPXUbxMQvS5jkCWKEMvPxSDDsDL96NmVMEoAK0Bxdh4hHvaiWnxP/Oj2kH6BFmNDVM65diMPoFaqZp5d3ppQ3KhsKl5pEfqc5gz+ulX2Pn3kGstoKssar4vjOyjbo1v+ilX90tdTitgqvnzu5Tv/KLx/DV7/2Ere+/w3lL7uWCr01j5mcHrgUbDEPdruyPjq4Yv/3Tm/xvFXzm5CvYp3jMsB7PsEOUn/NL4k01gYYJw+izQQvALihTMzrtEEY4Ty3qVyxE2Jl4XW1Ylp2iYVq/BsuAwZeUst+BeEKIWiHE2MSKcSwQ6es8KWV14u8KIcQLwOFAn8GXlPJ+4H6A3/y94qPnlT/hDBTk5OTkKn8pN07pqTem3CKJPH4jpadcj1U0Hqe5lvq1twWjiPrDL74cP3l6MLMLIJQ/hqnz7mLjskU0bvkwCMRSn8cnqZ14sJYa/X15GCnjM2Tqtq30/5J4ceVOjYR4THUI+Z1C0nVwO5rYsWIhTmuU/NKxuiV7FzOaGqb1SzGY/8+WIfAsq4d+QeTxGyg77aeJmbVq3uNA+uXTU78y8sZwyPx7BtSvvfY5iIkL72TN357mwdv+xGlHTeL4z+/7sdYwbatp4ImXNvHy1g4OOOZ8vjbrwGHrV05OLo3RSHJGSmLxGHRgS6lsPhLHXMdBJG7zNSwW2Yzb1oARzkMmgi6tYUNnuNuOa4DzgFsSf/+h5wlCiEKgQ0rZLYQoAf4LuHWY1/3Uk+Ye/cKv024TAuqfUzVf0nXwulqp+vV56oPmOWBYqngV6IpWklkyAWEYuB1NVCy7hMZoBDuRofGn0fsT7cdPTl8VDteKYrQZyFJjIHG7eu5sTNPEdbqRTjy4PegY8lycxkq89iZ1vJethEAIg4nnL2Hr0jm6JfvjR2vYx0QwwaMP/UIIVX8U78LrTNEvACSxyGYkckT1yzAMDvvGycivn8Rf/vEMq5f+nWkFkgu+eRB7lRftsvchFcdxefIf/+FPb1Yix0xn8oyzmXnCvsHtg7EEGkjD5h93FG5LLXgu0o2nnSPdOHgubmcros/x5QIMEzOniNLjFtH9/J1awz4iww2+bgFWCyG+A2wDTgUQQhwJXCilnAfsD9wnhPAAA1Uv8e4wr6tJ0Ndq4+q5s2lq62Ts+XcinXgweNufIwjQ8Jf7kbFOte/vd7tI1c3nOQ6xtkZCOYW4XR28tfQHxFobVJfMHs5A4paTk0vbc7er1WOmspDAczEyc7CLJyAMC6twPCDBtJLdjQES6XlUL1/Yr+eaZlTRGvYx0p9+mZaFjHVSft7twSLHaa7FLppA1T1zafjL/XjdHbAL9EsIwcFfmgVfmkW0eis3/G0dbt0bTC8JcfoX92FiWQHhjJ6f649OfXM7f3zlA/79QYRGL8zYI77FURdf/pEfbyANG7/XFNqevzOpYaCscMJ5yqLIsAAZ7Jyka5hUwVlbIzWPXaMd7IfBsIIvKWU98I0+jr8GzEv8/DJw8HCuo+mfvlY5jdGIamJxVe2DdFIKIBNmn16sk7EJYfNbvWORzTQ9eyc4ccYcfwXh0klUPXIlXqwDf1+tassmAEzTpHzi7jA7cdezcdkiOnZUBsW9gPK0cZuU8AuB294UrM5rHlqEdGJEVl1Lzylp6t9CJBygk6vwXTVhQLNztIZ9vPSnX+b6JUjPDTTMx8/UeN0djJ1zO9LdtfpVMm4vSk69CCklTdFalmz4G83rNhByOpheGiY7w+LkL0zDNAyyMkPk54T7fJzuWJyGlg4AIk1trN+wncr6dmRmPm0yg72O/G8O+OIhWPbIBXU+G5ctojOyFQl9aFgjwrTVDkl7Y9oOSc1Di/A6W0nVMOnEqF19LaYQTJg8vc/5nalo/eof3Rf6Cae/UTc7Vl4TdMPEY91BR7EwLYSdidNYTby+UonbTuoavFiHKg5fsRCrcDyhjEwAuqPbds0L2g1xuzowc4uZNO/utON+k4IZziPeUBkU00vXQVghzLwxlJ/587SupOrlC8nJtHql6nfVhAGNZndmZ6O6DMvGDmWk6ZdhZyhDz/ZGNZB7lPRLCEHhmHIKZ6oGCs916ersoLm+lp+99A/1WqIV5FtOn/dv6oiTM266Gihu5zL9G5dxYGYWdsauH0at9KuE8rNvTdOi7ug2qh+9ksnzV7B5ydmBfgG4bapRyyocm9YBr/Vr5NDB1x5GzfYKXCeO9Fw6aioAqH92KdLpBgReZ3PwIWv4068pOvr7QcdjV7SSWGsDamssTkdNhVoRxbt7+cHsyWxctgi3qyP4PdbagBHOpfLhK5DxruC4dB28zha8rrZg9hmoFaXTXBN442g0msHh61d3widPaZik/tm7kU4MYZpBd3f92sVgWIG9xGjql2GaZOXkkpWTS/le00b0sYdLz2klvn7Vrr6ul34hoeqRKxGGwdjzkibbseg2rPxyah/50ag+908TOvjaw3Bdl6zyvdW4R8Mi+swS3NY6NT8N1LZjok6pfn26o730PMwc5RxvFynjT7e9idpHLsdpraf6/u8SWLZ7Lt1lYz9xbcV+F1BTXQ0ypXPREIaqlaurwcotSXP73/7AAtUFFOtk7NwlwXEv3o3TXEskMcg82f3oC70g3lCFZyU/ZvG2BnJKdJ2ERtMXvn4JwwIpqX/u7kTRfUu6hgH2mL3Y8cClwX21fik7jZ7TSrY/sABQpSbjUvRLOjHiDZVEE/Y4skcNqhDK9iM1S6j1a+TQwdeeiutSu/papOtgZhekzE9TjnpOQ5WqqZCSzoj6cEnPUelmYSClJB5TYz7K59yubk/pjKm69zvK/Rg+Nh+wgRhouGxfafLXf3Em3Y01bFt2cXAsGI6dEG7pxJILac9RNRKuQzyyJWULROJ2NFL/5E/ILy4JHmviZN2SrdEMiJSAxGmqoey0nxBdd1uahjkN1UEh/lD1SwhB5T0X0BiN0FwfTbOf2J007KPoV8WyS/DisUHqlxIxt6NJ2eKk6peUauHY0Uz383cGj6X1a+TQwdcegj+l3nUcJTqWjZmVDwg1TsgKKcNCKZVoCXDbm6hff3uaHYJVOI54/XZqHlqkBnMnXKUBVVjeHAlWSDIjm7YuVeNgZmZBH0Ix2qQKVmM0QvkZarWcWmDr1yH471nduiV4iXS866q5Z2OOvwJhGGSWTKCj5kNAULvqWmIpq0A1z9FGGAZ28QTiDVWYKVkuy7RZ+sd/jsbL1mg+8agtRyfhjScxswsBgVU0HmFYqt4rRb+cxiq8zhbq1iRrkgbSL2FaOA1qVJGRXYh0nd1Kw4atXx1NQ9YvK7tQfQcIkaZfpmlSVFqurSR2ETr4+oTjp6F9b5tYawNmTqHKxgiR3AFLmOclfgnuX3qCamn2HAcJ2EXj2fHgZZTMXqSG1666RnmC+TO7PBercBxmdiFj5ywOCjj7GmD7cZBa+PnW0h8EZot9Fdi6rktGySSk5zDugqUAxKPbqVvzC+ySicSjvqmjwCochzCMlNU3iUG/arsj3lQDyLTBvkOZnTlYR3CNZk8i9f+936Wdpl8AQi0gU32+/L+McB5lJ16xc/3y50L6lhSei5ldwNjz7kA6sd1Kwz4O/XI7moNjEhWw9XfN/tD6NXQ+/m9LzbDwU8BXz53N1Hl38dbSHzBu7h3BPj8kJs0LEYzqAJHI2JjBh7ujpgJhWKob0jDVh9WyMcK5IASh4gkgjMTKSSBMk3hTLfGEEMZa66lv8bho9gxkPEbR2Am9nuuuSOn3TM03RiNUbdmEaZojdg1/1Q0Qysgk1t2lfjctGv58L3gOkVXXJMZ0JK4rBMYQgi+dytd8Gkn9f3/13Nm0dTm99cu0EQiEYaTpl7rNIqNkUlK/rFBv/Up8JkOJYdJOUy0A8fpKpPTwiyl8Dbtw5qFYoXBauQDsOfoVdI+aVtqINH+otkgJUiftvc+grqH1a+jo4GsPw8zMonr5Qty2xkB4jFAWNSt+qLI0UpnkeZ2tSM8Jxt/4HUKxuq04bfVB2t5tb1JZM0jxO/azaBK7SAVZRnYhJbMWklU+lc13nj1qbcc9W5z91eJwrTCkE0N6DtuXX6Y6RQ0T6XlsuetcdXtiHhpA+Tm/wi6eQCyyOegc9VuyNRrN4OlLv5ASkRFmx4qFafoFUm0zJrYppRcnFtmM05quX0IYSOnRs99RSi/QL0hqWOTxGyg9/SejMs3j49MvZU5r5hQhDJMJF6/Ai8dwm2sJl6oF+dalc3RQtQvR3w57GAfOUw72G5ctorOuEjO3JCFUSbzOVsJjJtDd1px0LzYSqyDpIYTBmOOvoP7P94DnULv6uiDQkJ6HmV2AEc4DCI4LBFJ6qjVcwoY75qnjQhDKzgueV09G2pwv1lLH9gcWBDYQ2393afD6mL0wWFHm5OSy/bFrsHOKcNsaiUe3I5HUP3MnXmcLNQ//COm5QZeVL+BWfjkgqX3kCmRCzq3CcX2MEdJoNEOll37ljWHH8kvTzknVr/KzblEHE/oFSpPS9Ouxq9IWS1J6WHmJ7Td/R0BK8Dy10PRcap/+BbUMrF8wsho2kH4BNNdHGb/XlF76BRB9ZslO9csuKEdKSc1Di8AwKTvrF9Q8cnmwmJa9QlTNrkIHX3soB85bzMZli+iKVlJYUhocb4xGCI+ZwIHzFvPG3fPZsUJ9oJ2WaFKcnBh1a27F62ym7Iyfp6S8BNG1iyk+9ocI01YrUH9gtOcgEIRKp2BmF1ByfHI8RmTVNWy4Yx5ua32v59lz5ecXkW579Coumj1DuTCbJkKCME0MYZBfXNK/sAmTsefdgRfvxm2pC5oDaldeRf3a24i3NQQdOxcfe2TiTknRkfEuSk//Gf4YjejaxVj5ZapuxLQw7BBevFudn2h57zkfTaPRDA9fv9yuDuJdrYGGfRT9EqaN9PzPqKB29bUUz7xYzTZ0Yqn+7VhF43eqXzfPP7OX7vSlYbHubrauvpYLZx4GiQWfkGBa9qD0yx8H5y/qfP0CpbUfRb+EaavtRpG023Caa4P3YWdmtZqRRwdfewh9FTyGgTE9WoNVbdhiarZXUDTz4sQcL6hZeWXaCilUMkkJmxBklU+lO7qN8ZOn05qTT+Tx68nIG0OstT4wbDUyc9I+vKGSSXhODKRUM8PsDDBMLpo9IwigQBkCphLr7lL1HELgucqTrOy0n6qtB9NOBHmwbfV1KjjzXJoSWTbDzkR6LrHIZtWq3kNMDpl/DxXLLgneD2FYFM++jMjaOwJDVK+zBby4WmkaVrJIF8B1VM2b5+K1N2IVTUDGfDNWAchgu0D74Wg0Q6OnhoUBMi1ySvbuUduqslBFMy8Kar98/QLAdQiVTQ30y99G8zUsKgR1T96oivuzC1RBv5RKoxLYxRPVoqqHfm2teJ/5xx0V6FdfBeWx7i6lHa4DhqX0CwJNGpR+pXgF+hwy/x4guf05kH4JM6FfQiQarjycxiowTNz2JuySSYlsoW8vAXhOoGFGio+YZuTRwdcewmDT26ndkSIzV31AAWGYyi6hvhI8l1hEuUOD8gGTnkPVlk0Uz7yYrkeu5JD597DhjnmUn31r4BnjNFQFmTA/8BJWSF3Dcyk78+dkle4ViCDAaz8/NZi35mMVj8fILlSNAkCodApevFu1mjsx4g1VGFn5mOE83M6W4DU4LRH1WkwbM29MUGTqv76e5BeXqOcxeyEZJZPojGwjum5xsIXhvxYgWbRbMkm9R4CMdeC2NahVZSKI9V9Xd0mprpfQaIbAYD4vqQFafWQHVq4Kgnz9AuVX1VO/QGWMarZXUDCmHIC2LoeSM2/BXzg5zbXBdfzAq6d+2QXleC21wef89V+ciSe9XhpmF01QI8ZyigiVTgGUKbMQItCvSfPuZvuKReApqwtfvzBMhBBY+WVBWchg9AtQ/l5+CUlipq9hhVTTVMkkYtFthEomIQwTGetgx4qFeJ3NfWtYj4YDzciig69PGb7AJVPWPrLHrEehPvCGhVVQhgCsUEawKtq4TA1drbpvXvIREl0zVkFZcpxHmsVFb2QPe4aOmgpEYsUlXScoFAVSgqGEOWBnK+XnJmsxnIYqoutuQ7rxNCFVz83htZtPR0o3WLk2RiNsXLaIgqMvBFS9hNvWQHTNrcojSEq89kYij19P0X9fFDyWVTgOI5zHxPOXUPXIlUQev4FQbhHxtgacxPaIbrHWaEae1ADtwpmHpdwig0WR0gcjTb9AhVhui9KFproaPGEmXO8T9/I8QGIVlA9avzzpYecU9dKw4DFlqkUGQW0VqHFIMt4VaJivX3bRBOLRrQn7Gv+5Obx282mACDTMk16aflU9ciVeZ0ugXyT0q2blVRR9I/k6AYxwLhPPXxLcT2vY6KODr08hN88/E1dKrEQnkU/Dn++h6BvfJZSRgeu6iRS5SBvGCiod3RWtpPTUGxIHkq3J9c8sofiYZJu4dOO4rVGEYSZS42qVWW/ZeIYJMulQre4gVdYsgRCqxbz2sevVXDKphMh3bXYaq5PXT9lm9LuYpOeoFaxhUnrK9cF5oYwMMlyX+rW3UbduSeCOXXrqjViF4/yr47ZEqH3sKmpXXgUktikAYasBvePPvpnq5QuDLU1tSKjR7FqCAvc+9KvsjJ8Rr9uizEL70C838bcwLMae/hNcJ1FiAIkaqdsS+qX6u3vrl8LXL89zibU29NIwH1+/alZehdfdrhqTEvoVefpmvK62ZG1qjzKJVA0Thsm47/1GbRuSnKLR9Oxd/etXwhS79rGrqF11rcp2JTzOhJV8T7SGfTwMK/gSQpwK3ADsD8yQUr7Wz3nfApYAJrBMSnnLcK6rGR5tba2UnfbTIB0OapBq/brbAEH5xKlUbdmEEcqidtU1hHKT0+79gvWqrZt7P7CUOC1RIquuUd2Qhqn8eUyL0tN+mjD4EwgBdiiDbcsuVqK2bnEgWG57I0Y4DyMzB6+rLSh98OeSKYdrVYwaXatWjMmB1t24Hc0JoUmvV1CF+5ZK5QsRrIDVbV2Mu2ApO5YvQJh2MmuWyAJK6ZFVtpc61NVBrLkOYZpsXToHACE9KpZdoleKn0C0hn3y8AvcrQ/eCzTM1y+VNVfByc70yx+NloaUuO2NA+iXCqh8/TKzCjDDeWka5rTWY+YWq+chE9mvWAdj59yu9CShX1Z+OdX3zwtqbL14N8LOoPLuhK6kaJj0PCJP3EjRNy/ELlDbpvGGKiBFv1Ys7EO/krY4R165KmhiiDXXBvoFWsM+Doab+XoHOBm4r78ThBAmcDfwTaASeFUIsUZK+e4wr60ZQYQwcNsaqF19LU7p2MAx3+8s8vEL1i+aPQO7aDxSgmGruoRk2l8ofx3DUHUHfvEpAJLa1dfjxTrxOluQnqfmsaHiLDOcT8nsy0BKoutuU/4+yxfitTcmR2O4DsKyg+eUui1gZuVTMnsRdslegdAZKYKWWsBqmibxtgak5xGLbFarwrwxifdDEK+vJKt8Khl5Y3q9B3p1uMegNWwPwNevHSsW4rRGMQxzQP3KKJmUNEwmRb+EsRP9gtrV16Xpl5+Fl56HXTIJ2uopOWZBoF81K36I295AvCHx+IF+pfRZJjSs+JgF1K+9jeJZC7DHTFa3JYI1tTgGYYWCeq40/XKddP1KjDvLyB8TFOv774XWsI+fYQVfUsr3gKAwuh9mAB9IKSsS5z4GnABo4fqYkSnbe1Z+qerq6W7nZ8vXcvXc2XQSwu3q4K2lPwjOi7c1cPP8M3s8kGp0jq6/HWEYGNkFyobBMBP2EokVob9S62qj/NzFwe/+6KLalVfhtjdS8+iViSLX5PaiTHOLl6oeLDEYPHgdiVo1pEyzf0ituYg8roTTt4nwEo9Ru+oahGH2qBWTdNRUEG9rYMPNpyITtWiGMHbbYeKaoaE17JON/9n39av0uEVEVl1HfnHJkPVLaYzEzCpQ5QqGidfeqOZDJhqKoH/9iqy6hnh0G9KNU7v62oQZbDIw8zXR1y+noSo47ls9CGEEpRKp+pxKzaM/DjTM16/I49cHQVrq6/Icp5d+QVLDtH59fIxGzdd4YHvK75XA50bhupqdYFpWr1ouy7LJyUm2UDdu+TAY7BrczzRpe+52wF+tiaBGS8a7KT39pmR63rSJR7dSu+pa7NIpgZmhGmOkMleh0inB+s/MLmDs3DupvHsOwrQw80ooP/tW7FAG2x9YEHTweC21mKaJsDKoXX1dr9cme3QugRK/+meWIF2HsXNuR7oOhh1S5qqeQ/36JUhksIUJEItsxrSswGNotFz7NbsdWsN2Q3pqmGXZjJ88ne6Ej9ZA+uXEUjqzEwjDpOxsNahbmDbxhspEAbudMKGWwRgjQGWnEoG7mV1E8ezLiK65FbejCaugPPDsqnnkcqV1KfoV6+5SJrCrru312nppmOeqaSOoEgz1uHHc5lqk52CPmUzVr8/rpV+G1q/dlgGDLyHE80B5HzddLaX8wyCu0deSst/2ESHE94DvAZyz6Ca+fPyZ/Z2qGQamafYaYZHqTXXl0pVcPXd2rxEbABWoGoGahxalHVfioOZIRlZfh4x1qVUcEK9X312ip3dMwvfGT7unZqycliiV91ygVoymmfC/IWgIKJ61kMiqq1XGLlE067RGEYi0AbKgAjunqRZhGCr977lKRA0DP6lmhMKBaaN/fcM0mTh57z4drDWfDEZTw7R+jR49NSze1hDULQ1Gv6ofvAy/AQgSHlmJf+rI49cH+qW8s1xi0W299Yv+s6ZOS5Ttd52jCuZNq5d+YdgIK5SmX2672kZk3YPTAAAMFklEQVRMHXvkI12HyKprUZ2d28FzMawQyU0BofXrE8SAwZeU8uhhXqMSmJjy+wSgeifXux+4H+A3f6/Qsw52ATk5ucHqL5WJPQxZd0bBmHIyjr5UTeRIZIwq756DlV+KMG21Optzu0rBp7gzu+2qvkvVP/Ru43Zb6oBkV6HTUqcc7oG6J24IzvPiMYRlYxWOo/TUG4NM2o7ll6qVqBAoVUoXRul5SDeOaWeSWaIErqOmAuk5FB/9/UAEAWpXX0thyZhAyDWfTEZTw7R+7Xr6MpSGj6Zf/lDuUOkUNTvSdRBWKE2/Io9fT3TtYoRhJfWrfnvSVT5VwxJjgYQwMHOLEs771qD1K95QmeisFkAf+iXVNqNAIKwQmSUTlH45MYxwLiXHJhfEWr92b0Zj2/FVYLoQYgpQBZwBnDUK19X0w0js8fuzEX2DVTOnSNUvJFaBIkU0ir81n1AiPb9jxUKQJEde+KtGP/Ml1ezI8rNVW3fNwz+i/JxfphkbAvz7ltMRmbmUn3WL6hLyZ0zaGdSuuhYrr0QJaaIewwinGMoKEQReQCIDZiIMK3DDBgjlFA77fdLsEWgN240YSf2yc4robq7DyivBbWvACOcD6fpVeuqNOM21yakfUtlAxOu2JBL9KQGSEMH2o11QTs3DP6Ls7FuRrZE+9av01J8AMr0723OoeeRHKjuf0Cw/zzrl0kfYvOTsNJ1S26AqeNP69clhuFYTJwF3AWOAdUKIN6SUM4UQ41Dt2LOklI4QYj7wHKpN+3dSyo3DfuaajxV/NdXW5eBKydjz7qDy13NxEsaA0nPSi96lDAwLjVCYmocW4bY3BRkuSHpnAclajgHmjfmF+KndQpFV1zD2vDuI1VYQKpsanFfz4GXDf+GaPQqtYZ9OfP2aOu8uXrv5dMaedwc1K6/CaahUWa0e+gWJrHlCv3Y8+EPc1voe+uVrFkl/MX9wdx/PQQVtvbNnwgpRfvatOA1VWr/2YIbb7fgU8FQfx6uBWSm/rwfWD+damtGnv/R+Ty8YI5QVpOwjq64BlFA5jYmdGekRj24FJG5rvTJX9bcGAbejCcMwcTuaqF15lXKYHiRCGH0KW3p3ZPpzrV11HYaZHNchXRfpOdSuvBKRclxIj5yS6cFrHsx7oflkoTVsz2XQn1npBrVS0vOIrLpG6VdDFaZtKyNWCfHo1qR+AXhO0ETkdqhi+NpV12IVlPfyGewL07SC+w8Jz0nz6JKuqzTQdXp5d2n92n3RDveafhkovZ+Tk0tT3aakBUMicDGEAYbJpGn7p51ftWVTmueMj+8542fSxp23mIEwhIHT0RR0KwZIiRePKV8vCVZucfI+GdmMP/tmti6dw6/XvjLgNVLR7dgazSeLwehXxbJLMITAa28ElIYZwsATglBmJuUTp6bdp840OeLHq3o9lh/YDEW/4m0NgVFqQMJioure7wC99QvAsDO0fu0B6OBL85HZ2Qf65vln9lppNUYjabVWPcnJyaUx+mFaB5N0nT5bzfKLS2htqCOy+ppet2VkZnLnmn8x/7ijKD3+/6Xd1h3dpoJDjUbzqWYg/Wp77nYqehwX/WTUYej6lZOTS9WTP8Hr8ZimZTNhynSqtm7W+rUHo4MvzS6hL2FTNRb9rwqvXLpSid7zdyYPdjZTdf93MYRBd3FJcDgnJ3dAh+bxe01Jf6yU4xqNRtMf/QVmO+saHKp+DZSN6vVYCbR+7Rno4EuzWzGS6XGdatdoNKOJ1i/NYNHBl2bU0EWfGo3mk4zWMM1IoYMvzaihV3IajeaTjNYwzUihK/c0Go1Go9FoRhEdfGk0Go1Go9GMIjr40mg0Go1GoxlFdPCl0Wg0Go1GM4ro4Euj0Wg0Go1mFNHBl0aj0Wg0Gs0oooMvjUaj0Wg0mlFEB18ajUaj0Wg0o8iwgi8hxKlCiI1CCE8IceROztsihHhbCPGGEOK14VxTo9FoRgqtYRqN5uNguA737wAnA/cN4tyvSSmjw7yeRqPRjCRawzQazagzrOBLSvkegBBiZJ6NRqPRjCJawzQazcfBaNV8SeBPQoh/CyG+N0rX1Gg0mpFCa5hGoxkxBgy+hBDPCyHe6ePPCUO4zn9JKT8DHANcLIT48k6u9z0hxGtCiNf+vkYPMdVoNMNjNDVM65dGoxkMA247SimPHu5FpJTVib8jQoingBnA3/s5937gfoDf/L1CDvfaGo3m081oapjWL41GMxh2+bajECJbCJHr/wz8N6rIVaPRaHZ7tIZpNJqRZrhWEycJISqBzwPrhBDPJY6PE0KsT5xWBvxDCPEm8AqwTkr57HCuq9FoNCOB1jCNRvNxMNxux6eAp/o4Xg3MSvxcARw6nOtoNBrNrkBrmEaj+TjQDvcajUaj0Wg0o4gOvjQajUaj0WhGER18aTQajUaj0YwiOvjSaDQajeb/t3fHLnKUYRzHvz/UWGjAwkJMAgqKGEQQRBQ7tThFFAVBCxEU0igoWKjkTxCsDIii2IQEQcWAkRhBSKOiSBBDjIQ0HhEsLBQsJPhYZItD9pIzOzPv7XvfT3Uz+3LzPHt3v3uYnd2RJuTwJUmSNCGHL0mSpAk5fEmSJE3I4UuSJGlCDl+SJEkTcviSJEmakMOXJEnShBy+JEmSJuTwJUmSNCGHL0mSpAk5fEmSJE1ooeEryetJfkryQ5KPk1yzzrqVJKeSnE7y6iLHlKShmGGSWlj0zNdR4Laquh34GXjtvwuSXAbsAx4EdgNPJdm94HElaQhmmKTJLTR8VdXnVXVutvk1sHPOsruA01V1pqr+Bg4Cjy5yXEkaghkmqYUhr/l6Fvhszv4dwC9rtldn++ZKsifJd0m+O3bowIDlSdIFLZxh5pekjbj8YguSfAFcN+ehvVX1yWzNXuAcsH/et5izr9Y7XlW9DbwN8M6xM+uuk6SNmDLDzC9JG3HR4auqHrjQ40meAR4G7q+qeWGzCuxas70TOPt/ipSkS2WGSdpsFn234wrwCvBIVf21zrJvgZuT3JhkG/AkcGiR40rSEMwwSS0ses3Xm8B24GiS40neAkhyfZLDALOLWV8AjgAngQ+q6sSCx5WkIZhhkiZ30ZcdL6Sqblpn/1ngoTXbh4HDixxLkoZmhklqYaHha2zXbt/WugRJuiTml7S1XHXlxkeqzL++dPkl2TN751G3eu+x9/7AHjXfVnjO7LEPvfc4Vn8939txT+sCJtB7j733B/ao+bbCc2aPfei9x1H663n4kiRJ2nQcviRJkibU8/DV7WvQa/TeY+/9gT1qvq3wnNljH3rvcZT+ur3gXpIkaTPq+cyXJEnSptP18JXk9SQ/JfkhycdJrmld05CSPJHkRJJ/ktzZup4hJVlJcirJ6SSvtq5naEneS/Jbkh9b1zKGJLuSfJnk5Ox39MXWNS2b3vML+s0w82v5jZ1hXQ9fwFHgtqq6HfgZeK1xPUP7EXgcONa6kCEluQzYBzwI7AaeSrK7bVWDex9YaV3EiM4BL1fVrcDdwPMd/gzH1nt+QYcZZn51Y9QM63r4qqrPZ/dlA/ga2NmynqFV1cmqOtW6jhHcBZyuqjNV9TdwEHi0cU2DqqpjwO+t6xhLVf1aVd/Pvv6T8/dE3NG2quXSe35BtxlmfnVg7Azrevj6j2eBz1oXoQ3ZAfyyZnsV/3EvrSQ3AHcA37StZKmZX8vD/OrMGBm2qe/tuBFJvgCum/PQ3qr6ZLZmL+dPIe6fsrYhbKS/DmXOPt+Wu4SSXA18CLxUVX+0rmez6T2/YEtmmPnVkbEybOmHr6p64EKPJ3kGeBi4v5bwczUu1l+nVoFda7Z3Amcb1aJLlOQKzofW/qr6qHU9m1Hv+QVbMsPMr06MmWFdv+yYZAV4BXikqv5qXY827Fvg5iQ3JtkGPAkcalyT/ockAd4FTlbVG63rWUbm19IyvzowdoZ1PXwBbwLbgaNJjid5q3VBQ0ryWJJV4B7g0yRHWtc0hNlFxi8ARzh/keMHVXWibVXDSnIA+Aq4Jclqkuda1zSwe4Gngftmf3vHkzzUuqgl03V+QZ8ZZn51Y9QM8xPuJUmSJtT7mS9JkqRNxeFLkiRpQg5fkiRJE3L4kiRJmpDDlyRJ0oQcviRJkibk8CVJkjQhhy9JkqQJ/Qv/lKW33S1MvQAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAl8AAAHiCAYAAADWA6krAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzsnXl8HVXZx7/PzNybpM2e2ybpRsuisrkA4o7gAgIVN7QtZSkIaKUspa/1ZRUUKFSpQIsI9GUpQls2EQuIKKgoKigKiMjWLXt6sydNcu/MnPePM3dyb5K2aZut6fl+Pre9d+bMzJmbzC/Pec5znkeUUhgMBoPBYDAYhgdrpDtgMBgMBoPBsDdhjC+DwWAwGAyGYcQYXwaDwWAwGAzDiDG+DAaDwWAwGIYRY3wZDAaDwWAwDCPG+DIYDAaDwWAYRozxZUBE5orIb3bx2NdF5OhB7tKoR0SeEpEzRrofBoNh8BCRo0WkcqT7YRj7GONrD0NENorI5wbznEqp+5VSxw7g2veIyDW9jj1YKfX7nbmeiEwXESUi7cFro4j87052e0RRSh2vlLp3pPthMIx1An3oDLSiNtCh3JHu1+4SaGBHmg42D/P1jaE5ghjjyzCSFCqlcoGTgStE5PODfQERcQb7nAaDYdj5YqAVHwQ+BFwywv0ZLD6glMoNXoU7e7DRtz0XY3yNIUTkHBF5R0QaReRxEZmUtu9YEXlTRFpE5Kci8gcROTvYN09E/hS8FxH5iYjUB21fFZFDRORcYC6wOBil/SpoH3riRMQWkUtF5F0RaRORf4jI1B31Wyn1d+B1tLCm+jtJRB4RkS0iskFELkjblyMi94pIk4i8ISKL00dwQZ++JyKvAh0i4uzgfEeKyN9FpFVE6kRkWbA9W0R+LiINItIsIi+JSGmw7/dp358lIpeLyKbge1slIgXBvpSX7wwR2SwicRG5bKd/uAaDAaVULfA0mVpxooj8M3h+K0TkqrR9233+Ai25J9CS/wAfTr+eiBwYPOvNokMsTkrbd0+gpU8FmvhnESkTkZuC8/1XRD60K/e5Ay1XInKeiLwNvB1se5+IPBO0f1NEvpHW/gQR+U+gyVUi8j8iMh54CpgkPZ63SX06Yhg6lFLmtQe9gI3A5/rZ/hkgDhwGZAHLgT8G+2JAK/BVwAEuBJLA2cH+ecCfgvfHAf8ACgEBDgTKg333ANdsqz/Ad4HXgPcGx34AKOmnr9MBBTjB548CW4GvBJ+toA9XAlFgX2A9cFyw/3rgD0ARMAV4Fajs1ad/AVOBnAGc7y/AacH7XOCjwftvAb8CxgE2cDiQH+z7fdr3dxbwTnDeXOBR4L5e93pn0JcPAN3AgSP9u2Re5rUnvHppzJRAY25O2380cGjwnL8fqAO+HOzb7vMXaMnzQHGgF/9OaQkQCZ7rSwPd+AzQBrw32H8PWnMPB7KBZ4ENwOmBXlwDPLed+1LA/v1s36aWpx33TNDnHGA8UAGcidb3w4LjDw7a1wCfCt4XAYelfW+VA/kZmNfgv4zna+wwF7hLKfWyUqob7Zb/mIhMB04AXldKPaqUcoFbgNptnCcJ5AHvA0Qp9YZSqmaAfTgbuFwp9abSvKKUathO+7iIdKKNn58CjwXbPwxMUEr9QCmVUEqtR4vn7GD/N4DrlFJNSqnK4H56c4tSqkIp1TmA8yWB/UUkppRqV0r9NW17CVogPaXUP5RSrf1cay6wTCm1XinVjv7uZ0vmlMDVSqlOpdQrwCvoPwIGg2FgPCYibWgjox74fmqHUur3SqnXlFK+UupVYDXw6V7Hb+v5+wZwrVKqUSlVQaaWfBQ9mLo+0I1ngXXAnLQ2vwh0oQv4BdCllFqllPKAtegp0u3xcuBVaxaR1LW3p+UplgR97gRmAhuVUncrpVyl1MvAI+hwDtA6dpCI5Aea+fIO+mQYBozxNXaYBGxKfQiMgAZgcrCvIm2fAvoNtAwEZgVwK1AnIneISP4A+zAVeHcn+hxDi9v/oEdhkWD7Pmh3eEqUmtGjz9Jgf8b99Hrf37Ydne+bwHuA/wZTizOD7fehpzjWiEi1iCwVkQh9yfjug/dO2vkh09jdGty3wWAYGF9WSuWhdeJ9aO0AQEQ+IiLPBSEFLcC30/cHbOv5660l6c/xJKBCKeX32j857XNd2vvOfj7v6Dk/TClVGLxSoRDb0/IUvfXtI730bS5QFuz/GnoAvkl0uMnHdtAnwzBgjK+xQzX6IQQgmNMvAarQbucpafsk/XNvlFK3KKUOBw5GGyXfTe3aQR8qgP12ptOBR+lGoAv4Ttp5NqSJUqFSKk8pdUKwP+N+0EZfn1P36tc2z6eUelspNQeYCNwAPCwi45VSSaXU1Uqpg4CPo0eYp/dzrYzvHpgGuGQKscFg2E2UUn9AT/f9OG3zA8DjwFSlVAHwM3TYw0CoIVM/pqW9rwamiojVa3/VTnZ7Z9melqforW9/6KVvuUqp+QBKqZeUUl9C69tjwIP9nMMwzBjja88kEgSDp14OWoDOFJEPikgWcB3wN6XURuAJ4FAR+XLQ9jx6RkUZiMiHg5FkBOhAG0VesLsOHde0LVYCPxSRA0TzfhEpGeA9XY8O5s8GXgRaRQfN54gO5D9ERFLBsA8Cl4hIkYhMBhbs4NzbPZ+InCoiE4IRbmq5tycix4jIoSJio2PmkmnfRTqrgYUiMkP0EvjrgLXBFK/BYBhcbgI+LyKpoPs8oFEp1SUiRwKn7MS50rVkCnB+2r6/oTVwsYhEROcz/CKwZrfvYPtsT8v7Yx3wHhE5LehnJNDxA0UkKjqPY4FSKonWsXQ9L5FgcZBheDHG157Jk2iXdup1lVLqd8AV6Ln+GrQHajaAUioOfB1YinZfHwT8HR142pt8dDxUE9r13UDPKPP/0LEDzSLyWD/HLkOL2W/QD/n/oQNCB8ITwTXPCeIlvohe0bQBHTy6EkiJxA/Q06YbgN8CD2/jXgDtXdvB+b4AvC4i7cDNwOwghqMsOHcr8AY6yP/n/VziLvQU5R+D83eRKeIGg2GQUEptAVah9Q60x/wHQUzYlfR4dgbC1Wid24DWrfvSrpMATgKOR2vGT4HTlVL/3d172B7b0/JttG8Djg3aVKOnWG9AB+sDnAZsFJFW9JTsqcFx/0UPHNcHmm5WOw4josN/DHsTgRu9EpirlHpupPuzu4jIfLTB1DvI1mAwGAyGUYfxfO0liMhxIlIYuLEvRcdE/HUHh41KRKRcRD4hOr/We4FF6JVGBoPBYDCMekx23L2Hj6FjCaLAf9CrhzpHtku7TBS4HZiBjtFag54SMBgMBoNh1GOmHQ0Gg8FgMBiGETPtaDAYDAaDwTCMGOPLYDAYDAaDYRgZ3TFfLyw3c6KjiB8//Fe6Dj+L0qkzRrorhjHKOUftO9DkmKOeO/+43uiXwbAXcfCkfD6+f2xAGmY8X4YB88n3lVL11r9GuhsGg8FgMOzR7LbxJSJTg7pab4jI6yJyYT9tRERuEZF3RORVETlsd69rGH4+esgMGt94YaS7YTAMKkbDDAbDcDMY044usEgp9bKI5AH/EJFnlFL/SWtzPHBA8PoIcFvwv2EPo3y8QimFLg9pMIwJjIYZDIZhZbc9X0qpGqXUy8H7NnQZlsm9mn0JWKU0fwUKRaR8d69tGH4+9d6JvPPPP490NwyGQcNomMFgGG4GNeBeRKYDH0IXJE1nMrryeorKYFtNP+c4FzgX4PbFszj3S5/I2O8jdNjFeE42Ay9cP5wobLeL8V4j1hgsGn/Ee8p58tl/csBhnxzprhgMg87uali6fp266BqOOmlO5vlRFER8sm1GpfdYKUWXBy1JCzUq9dVgGBsMmvElIrnoQqAXKaVae+/u55B+LROl1B3AHUC/qx077GIiuYXkisco1C6Ugm6VTUc75HkNI92dQWefsmKS9S+bqUfDmGMwNCxdv/pb7VgQ8Skcn40vDqNVwLKVCx1dNCftke6NwTBmGZTVjiISQYvW/UqpR/tpUglMTfs8BV19fafxnGyyRqnhBVpPs8QLPHNjkymFEbo7O0a6GwbDoDFcGpZtM3oNLwARfHHINnaXwTCkDMZqRwH+D3hDKbVsG80eB04PVgx9FGhRSvWZchzgFUetbqXQ/RvlndwNZn1if/79+1+OdDcMhkFhODVMREav4ZVCxHi1DYYhZjA8X58ATgM+IyL/Cl4niMi3ReTbQZsngfXAO8CdwHcG4bojxq+f/wfvPWE++x93Ltff+fBId2fYOXS/chJVr490NwyGwWKv07C//+lZvvnFT3LmCR9j7crlI90dg2GvY7djvpRSf2IHbh6lq3eft7vXGg14nsd519zOMyt/wJTSEj48axEnHXMkB+0/baS7NmxYlkUOnSS6u4hmjd3pVcPewd6oYbdeeynX3bGWWFk5F8w+no8ecyz77Pfeke6awbDXMLrLC+0mR556GfGWzj7bYwU5vPjza3fpnC++9jb7Tytn36llAMw+/lP88tm/7VXGF8ChUwuprXiXKfsfPNJdMRjGJBee/hVaWnvH/UNBfj43r/rFLp/3zdf+Sfm06ZRP3QeATx//Jf7y3NPG+DIYhpExbXzFWzo5+Fs/6bP99dsX7vI5q+oamFoWCz9PKYvxt1ff3OXz7anMPuogFjz4K2N8GQxDREtrKwecu6LP9rfvWLBb522or2VCWU8as1hpOW+++s/dOqfBYNg5TG3HnUTPPmQiYzi4flsU5Y8jn/aR7obBYNhJ+tUwE2BvMAwrxvjaSaaUxaiojYefK2vjTJpYPII9GjlKs12aG+pHuhsGg2EniJWWs6W2Kvwcr6uheGLpCPbIYNj7MMbXTvLhQw7g7U3VbKisJZFIsuap5znpmL2zxNsnDpxE9X9eHOluGAyGneC9h3yQ6k0bqK3cTDKZ4A9P/ZKPHn3cSHfLYNirGNMxX0OB49isuOxbHHfOVXi+z1lf+RwHH7B3Bdun+MwHZ7ByxfMc9KmZI90Vg8EwQGzH4TuXXsdl356D73kc+5XZTN/fBNsbDMPJmDa+YgU5/QbXxwpyduu8J3z6CE749BG7dY6xgG1bFGcpfM/Dsk1KbINhMCnIz+83uL4gP3+3z33kUZ/lyKM+u9vnMRgMu8aYNr52NZ2EYeB8fL9CXn/jX+x3yOEj3RWDYUyxO+kkDAbD6MbEfBl2i68fdRBVf396pLthMBgMBsMegzG+DLtFdlYEv23LSHfDYDAYDIY9BmN8GXabSQUROlqbR7obBoPBYDDsERjjy7DbfO3Iabz5t9+NdDcMBoPBYNgjMMaXYbf56CEz6Kz890h3w2AwGAyGPQJjfO0CZ112MxM/eRqHnLR7NdbGEk5nw0h3wWAwDIBlVyxk1qcP4VtfOXqku2Iw7LUMivElIneJSL2I9Ov+EJGjRaRFRP4VvK4cjOuOFPO+8ll+fcdVI92NUcXBU4uo2bj3FRg37Pnsbfr1+S99g2tue2Cku2Ew7NUMlufrHuALO2jzvFLqg8HrB4N03QERb2rlawt+QENz66Cc76gjDqG4IHdQzjVW+NrH9mfj30zKCcMeyT2MYv1qaWrg2gtOpbW5cVDOd+gRHyOvoGhQzmUwGHaNQTG+lFJ/BAZHGYaAVY8+TVPVO9z7iDEOhoppZcW4zdUj3Q2DYacZ7fr17GP341e/wu9+8fOR7orBYBgkhjPm62Mi8oqIPCUiBw/XReNNrax75jlu+2op6555btC8X4a+TIgm6OxoG+luGAxDwYjoV0tTA/985mFu+uoU/vnMw4Pm/TIYDCPLcBlfLwP7KKU+ACwHHttWQxE5V0T+LiJ/v+OXf97tC6969Glm7ie8tzSbmfuJ8X4NIWcc8z7e/Iv5fg1jjl3Srz8+vnq3L/zsY/fzxf3hgNIcvrg/xvtlMIwRhsX4Ukq1KqXag/dPAhERiW2j7R1KqSOUUkec+6VP7NZ1U16v0w/XhWhPPzzfeL+GkH0nxWjd+NpId8NgGFR2Vb+OOmnObl035fU65fACAE45vMB4vwyGMcKwGF8iUiYiErw/MrjukOcmSHm9Yrm6fngs1xkU79ec//kRH5uzmDc3VjHlmDP5v0d+Mxjd3ePJHZeFk2jD97yR7orBMGiMlH6lvF4luRFA/z8Y3q8li+ez8NSZVG58l1M/exi/ftSsfDQYhhtnME4iIquBo4GYiFQC3wciAEqpnwEnA/NFxAU6gdlKKTUY194ev3/xFaprunngtZqM7ZPir3DxN7++y+dd/ePv7m7Xxiwf2beQ6vX/YcoBh450VwyGATFa9eu1F5/n+ZouVr9ambG9cMvzfOXMC3b5vJcsvW13u2YwGHYTGQYN2XVeWN6nc81ZUygcNyg245DSvNWlsLtyxw3HGPVNbVz6TAsf+eq3Rrorhj2Qc47aV0a6D4PFnX9c30e/ynN8IjmjP01NsrOdmk6Tg9tg2BkOnpTPx/ePDUjDzNNlGFRiBeNJ1r870t0wGAwGg2HUYowvw6BiWRZFWT5b203KCYPBYDAY+mMPNL4Uo3mmFAj6N8o7OYR88YjprH/59yPdDYNh1KGUYk8QsFEdjmIwjAH2OOPLdrvoVvao1S+loFvZ2G7XSHdlxPjUIVOpe/Plke6GwTDq6PLAUu7oNcCUwlIuXWbBssEwpIz+yPVejPca6WiHLicbGI2xuQrbbWO8t/fm4olGHHLdZjzXxXb2uF8xg2HIaEla0NFFtg1B9opRhVKKLi/op8FgGDL2uL+MFoo8rwHMyGxUc9T7JvDupneYst/7RrorBsOoQSE0J21IjnRPDAbDSGKGN4Yh4VOHTKHi5d+NdDcMBoPBYBh1GOPLMCTMKC+hvXbjSHfDYDAYDIZRhzG+DEPG9EKhzdShMxgMBoMhgz0u5suw5/D5Q8p46t03OPDwnS+QvmTBHNr7yRWWm5vHJStWD/pxBoPBMFjsjg4ZDds7MMaXYcj4xKEzWLXq2QEbX+mi0xSvJ5JbDICdPY6Dz74RgPUrz9/uOdrb29j37OV9tu/oOIPBYNhdUhqWrl/Qo2ED0SGjYXsHxvgyDBnjsqOwtf9pxyUL5lC1aQO+8sNtvu8RKSij7KSLcbq7ECsCQN3ay3l1xXwA3Lb40HfcYDAYtkPKyGppiGdqWDLBpNOWMiGZROxIuD2lYUa/DCmM8WXYKY6cfyvxtu4+22N5Wbx423l9th8xvYCrz/4SCVfnBmneUosSC9/3sMcV4nc0I5YFYmGPK8Rtraf6kesQJ4vy07W3y84tYtK8mwDYtOL08Nz9ueeb4vW8vnJR6CkzGAyGdHZ2Wi+9fbp+OeOLUFnjtYbZDvb4QvzWOPW/uhHluVg5+ZTNuQ7o0bAd6VfqGoaxjzG+DDtFvK2bg8/pa9i8fueifg2z7u4EdQ3NHHHl4wC8umI+k+bdRGf9ZuyCidQ98D3Kz7iJRHwzTkEZbkstkcIyalZdjJ9MAArluWyt3wSAl+jmOzOPxBILX/kZU5Mlx52H091NfN2N/OvWBfhJXWVARPA7W7ls3kwTN2Ew7OVsb1pvWwO67NgUDj77xgz9cgpLAai9fzElJ15MNDaNRP0GohNn4Ce7qb1/MX5S62FKw3zP5bwTj0AsJ0O/AHzLZsIJF+I9cEkf/bJth2R7I0sWzDH6NUYYFONLRO4CZgL1SqlD+tkvwM3ACcBWYJ5SytSf2UNIN6qq4q34G+oAcGyh8elbaYvXoHyPzfVgjyvE62xD+S5ObhGOlY3vuby6Yj529rgBXa+nrlxmCRYRC7Ed9lmwiq217+o+FE0GoPa+RSS6uwBBbAflu0w6a4U+i5vAb61j8vQDeHnJ17ls3sw+1zRG2d6L0a+xT+940qqNbwOw5cmbcVvjQT1Lj4b6GkTsQL+KsW0Hyc6jc0slr69cNKBrKTfR73YRCxCcvBiFX7gAlMIpnhzur71vEZ6vNa+3fkWiWXTHN1O5+lKjX2OEwfJ83QOsAFZtY//xwAHB6yPAbcH/hj2AlLfrz3d+H0+1UvurZQC47U0o36P0G1cTlnqyHfA94utupOTERaAU8vI68o/4MvUPXg4IyUQ3KjCslFKotJiJdMSJ6v8th0jxFGrv/y4AnfWbtV2WKs8i2uByiqbgNlbitTcBkIxX6Gv4LtGsLP1eLMYdtxDPyyyRULHm8gGNKs1KpDHJPRj9GtOkvF2vr1yEUlD/K+29d1vjlH7jB5mNRQL9ulh/VgpEqH/o+6TrVzhI7FWnM/VRkAwNU8pH+S6JtkZ6DywRAcsmUjIFFHjtTRn65TsOtm0b/RpDDIrxpZT6o4hM306TLwGrlP5t/auIFIpIuVKqZjCubxg6jpx/K1XxVuqXL6arrQkrJ1/bPdFxTJx1MQ1PLIMgsNQpKKX+4avxO1vxO1tpePJmLRzJLrpr3sIeX4TX0RQKUohKe+O5uI1VeB3N1N67EAC3o5Ha+y7GbY0jlqUNt8DIEzuC8tJqtdgO0Tztys+ZOA2A7vhmyqbuGzbxPI+s2LSMLkRyi/sVpd6YlUhjD6NfY5slC+bQFK/n1RXzSbQ1YuXkAVrD7PGFRCZMJ9lYCUDjb36K39Ue6hdo48fKzu1HvwSkR778ZDcohdtYCSJ4HY1aw0RrWPzxpYjlAAosrV+hpz5Nw8SyieYVZ+jX5OkHAFCF0a+xwnDFfE0GKtI+VwbbjHiNcl7bFKfk5KsRyybf93CKJiF2hNpVC/ttrxJbKTvtRtyWOiIlU/WUX2c7tasWonKLdBvlI2LhNlWD72nh8T3cljrEier9lk1K1gQoOfFiGp68Ga9tS8+1lE9yywZAcFvjVN9xtt7u+4Cic8tmPTr1vXCawfc9PHfnCutta8rCtu0Mo84wZjH6tYeyZMEcKja+y4STr9LeJ9/VGuZEqF11cZ/2fqIzQ79AT/vV3r84bNOjX1XBQV4f/UrNBCiU9oABsZmLaPj1ih4NUz5uY0XY1mtroOqn81C+R3dLvdGvMc5wGV/SzzbVzzZE5FzgXIDbF8/i3C/tfIJOw+Dh+4po8STEiZKIV4AIykuifA88Ny0+K6Cfn6qdW4RSHspzUW6Cqp/O0zvEQiW7qL7jHL3acXwhXkczynP7nEjEQrnd2OOLicamoZQi2aD/HkaKp2DnlVA2dykAbmMVdQ9eqc9j2WDZKCyyY1MQsQHBTXSj6Ikvc90kDXVbmH/84YhtY4lFQUmM3Ny8jNHiqyvmh6PO7vjmwfiKDaOfXdKvUxddw1EnzRnKfhl2QHt7G5HcYiLFkwMN26w1zE0GOrMT+F4f/UL5KN+j/qGrsHOLQv0SywoGgfS0tWytYbnFREqm9uhXyVRQKtSw/vTL90EsK9QvIEPDUvr17RMORxShhk3eZ4bRr1HKcBlflcDUtM9TgOr+Giql7gDuAOCF5f0KnGHH7GxKiNQxL79Vpaf0ApSCREOVjquybCwnolchWjbYTjCmSyFhLFdvxHJA+TiFZUw98+Zw++aV5xGbuYhxZXoEVnH3hZSceDFOQWnYpvb+xWFcmGSNo+aeiwDwOnQOMWt8EVbWOMSJoNyk7pdlE193oxaujmaU7yGWrY1GFInGKn1zwb36na3gu+BkYY0vwm2uo6Guhoa6GkDRuGSW/j62EUxrGNPskn7d+cf1Rr92kV2JTUod01BT0aNhwU8g2Rh4qSwby47gu4nQExbGjg6A/vSr9MvfAyArNq1f/QL0ADMw9iQ6jtp7F4b6ZY8vRqG0hgUhHL31C1SaMaforN+M8pN6+hKtX6HBZ9koBDfRyaZ33wKg4dqTtf4pj6r7L2Hy3CUDvmfD0DBcxtfjwAIRWYMOVG0x8RJDy/ZSQmzvGJwoU8+7N9xWfc9CHRMR36R1rJenS7LGUb/2ckAbQX5Hk55OTHmmfA8sS4tcMoHX3UHF3Rfq9tFtrH4UwW2s6hHFwMsGUDr7WkQEpRQ1d5+PWA6lc28A0IZXgJ1bROzERTiFpdTev5jYiYvImTiNjctPo/6hq3R739c5xoLbsvNixE5aTDQ2jeq7FjDprBV6arOhkmgwWqxYfuo2vz/DmMXo1zCzK7FJqWMal8xiSqBhSvnU3LuQ6ITpJLZsDLYFi31QSCQrwxACMvULwPfwO5qw8ybgtW0J9QvA62jq0w8RK1O/IMMLVjbnOlCKmnsXolCUnfGTcFCnvCRIX/0qP+MmkvEK6tZetk39Kp19DQ1PLAsXCjSsW0bZGXpxVErDEvUbaHiqx3g0jByDlWpiNXA0EBORSuD7QARAKfUz4En0Mu130Eu1zxyM6xqGhprVl6ESWwHtXUo2Vuqn2/e0gCkdn1C3+tLgCJ2LS7U1BB8DA81ztcB5KpxidIrKdZNkl54CAOLrbkRsJzTGnPxSne+rZEqYXkKPZFVPX3quHL4PPV/ovDrKd6ldfQlua5y6tZfp7b6PPb4QiWRRcux54VLv2vsW0Xt2SSk/HDnrnGP6ihtXnEE0t4hkeyP1Qb4xUT7nnXgESrQYpqYtwawkGu0Y/RpbKDdBzb0XBR/A72gi0VARalgyvgmUwkvpFdroERHtSUrTL30K7XVSbgI7fyKx4y8k/tTNqKRObVP/y6UopRDbwW2NEymZQqJuPdGJM8L4MLEssB38zrZQ95QfTCsG1xMnlRFfQv1KNteSbKql8tbTg2O0fun2UUrnaA9W7X2LglQWvb+Mnrd+MqEXBLTG2bjiDARl9GsEGazVjtsNbAhWCfU/12UYNv585/fpiLey76nLMrb3nopUia2Un/ETAGpXX0bDumV61WJnKwBWTj6RkimUHPsdIjE9G1N9z0Xge3gdzcSfujmcjlQo/I4mnPyJ+B1NKLcbf2sLdm5xmohkUfKFBdQ9eAUAblMlWJHQFJKo9q4p3wc32ZMbx7Izpj1FrIxJT6doMngepbN+SCS2D4iQbKggUjI1WIWkl4KnRp06yJ9witNtrNbvPbcnuBbBT3aRaGvUx0WilM2+BoD6Xy5l2jd1bp70FUpmJdHoxujXnsHrKxfRFa/vk+eqj3Fg2ZSfoStiKKWoW3MpDU8sC1ZL29jjC1G+h1NYSvlpenag+p4LKT/9J1QuP6WPfoloY8jOycNPbCX+xI147Y2hhomTRclx+tejbu0VJOpiTSpnAAAgAElEQVQ3BKko/FCdlO9T/+CV+rqpKcmUfgUest7GU2phgNgOU85bhfJ9ko2VREqmIiJU37UAK5IVJnJ1iidnGFve1qZwFWemhqUGp57RrxHEZLjfi0h0dVI++xosW3C9nqf0lTWXs++py6hpDGIsFOFDXDr7WtzGKpSboP6hKwGInXgx2E6/AatOfozSU27Ajujl2MpNULNqEbEvLKDu4asp+dy3sXLyiJRMQblJLBHqH/ge0/Y/EHei9orZTgQrv1QHqaLd9DX3XoTXFqfuoSvJyp8AgN/egPJcqm7/ZoboiGVj58UIxqwD/4LCtGEpV74frthMLS+380rA9yg/fRm19y3Ctp0wgLXP4gODwTBoeF1bKZt9DbZtZ+S5qlhzOZfNm0lLQ0/dxPRnseTY81DKp271pSjlUXJCMG3Y+3EV7U0qm/sjrDT9ikSz2HDzXLzOVspPD6bxmmtDDYuv/l+m7X8gAFtsm2n7H8jmd97IMKac/BhTz7yZ9T+ZRXz1/wIp/fKoXHFar44onIKy4PrJPvezLUQk03mvVBjMr7yk1jClEMti4qxraPjVj4x+jSDG+NoLcT1FzoQp4edIbhEHn7OUqiXn9GkraCPEKZ6MPU57qpziSYCOywqn43wPiWTjtdbjNlXjpbxIvgcCEccma3we3e/+jXGHfF4LkwhONCvjerm5eVSsuRzJztPTjQFWdBx2/gSS7Y28f8Ft4faqjW+TFZtG9T0X8f4Ft/H6ykV4XVvpbq0jvvp/dbCqUhmiJOkBtkpR/9D38TtbUb5P/HG9YtLvbAXPpW7NZfjdHeExfhDjUf/QVdi2eXwMhuGmd56rSG4x+569nH9cP4vaivV92qcGUSlPe6oqhtavbkD0dGRDJSC4TVVBPq6eBKciguNEiAR65SKhhqVjicX6lefTUF+Dkxfr2R6EVIjtZOgXaA1rWLcs3P76ykV0bqmk9r5FOKmpyDQNS9evlJfe62ii8tYzUL4XapjyfWp//t2e8A/RScmU79Pw1C1Gv0YY8+2PQY6cfys1jW19jCk3mcDp5fVKx1IeyaSLaouTqN/Qa6+OadCxDUGSwV7CUz7nOmrvX6xzgpVq465zSyUR2w6aW1g7eOAvWbGay+bN7DeLc+2ay7H6iWvoileSaGvk1RXzM7ZP/NJ3qf/lj4Kl5S5ua50Onk1263gLpUjGN+F3tTHx5Ksy7qnh1yuoe/DKYJT4Q1LKJ2KhlE/DE8uwd2KVlMFgGBhLFsyhpSHOP66flbHdTyb6eL3SEeVTdf8lgCLZS790aIFCLCdcUdijX1oPo4Vl2LlFiOX0SXBaa/UTT9UPBSUxrr1nHQu++FEmfjFzcVN3fHOv1eE9JNqb+uhXbOYiollZVNy3GOVl6leK5JZNQcyYMPHk72fcU8Ovb8VtrqV0zrVkjD49l/gTyyCMMTOMBMb4GoPE27r53Pfu6LP9N0vO4cBpE3ktqM3Ym/IJRVTFW5HxxWSVzgi3J+I9iQABxMoUkNQUI621YSxB5xYda+DYQjQ7h5o1ekVk1PfY8vjScJUOIniB8LhtetogNzeP9qd/0qd/U6fv1+/yc+X72LlFTJp3U7ht88rzQpEWy9HTCEohdgS3pQ6/s5X4E8sQ28HraAHRI2Iroke2Esnqcx0IAvx9D6+9ES8YdaZWP/mdreF7sRwmn7ei33MYDIZt097exoe+1zfA+x/Xz6Js6r5hktDeFE7QU3WNW+qJlvYkD03ENwcGVzCAyjCkJJxi9FrrwkD3VA4sOzVwVD7J9ka645vZ8sTN+MmuMEbU62gOa9fmBGedvM8M2n97S58+btPbpFSGflXcfSGR4sl4rXWI2H30C7TeNDx1M25bXN+ZCJHYPohlUbv6UpSbMtIy3P4ggt/ZSgJBud1Gv0YIY3ztpbRvqcYPlj93tTXx3PLFdDbFtXc7Wcvm5ZlxCPb4Apxx+fi+XiINaGNJaTFTbgLrt0uxOptoevQqyot1CQ8fKAT2nzGBeFs33aUzaH39D0w65/bw3DoQVGh4RNdY297KmiUL5mQEgLY0xHG9JJGCsoykgVY0m9o1lyPKp271JSgBFOHUg503gZIvnIftRKi+/xLEyhwFqsRWSmf9kPiTNxEpnhqOKJMNFWDZ2LnFuoakCLET9QhXLxPX7eofvDLsZ25u3g5/HgaDYefoileifD/0eifbG/GTCZTy+6SESQXGixMJV0L26JeNchN0//YW6Gyh4ZEfhCv9ANYDU2YcQHt7G5OnH0C9203ZaT1pfFL6Vf/glUzYVwepby8XWe8A9paGuL5+mn6J5VB73yIdDuF2U7Hi1Az9Aq1hE2deRP3aK/F71cdNVRqpvW8RkeIgxCRYcIRlY48vIjbzf6hdc4nRrxHCGF97MEfOv5XX1tfii52x3fN9rM31HDhtYrjtjc31uJ7HU9d+s3eqLpSXpLOlASwHAZzCUmInLCRSomMjqu86n9iJF1MwaTqd3ckw+Lzm3oU4ojjm/KW8fuci1v+8b7mO3v195dm78Ls68Frqw+2Wk4XgZwjetuhP1C6bN7NPTqDJ31nB+pXnc+0967bbDqBGRE9DkhZ0GhZs8zJrR/oeWE44QiY41nYcfF+wHP1IyQCnKQyGvZUlC+ZQueHtML1BCuX7jKtY36f0je+5vHTdyX0C5ZWXpLslKNlj2Qg2TmEZ5af9GICaey+i9JQbcJuqw0z3qe22CO9fcFsfrdhWf9evPB+3o6mPfmXHpiCWtcOUDNvaf9m8meHqQiD0OqX3a1v61V0SCxYbBPnLlNJvA0OqX/1K6ZrC6NcIYYyvPZh4Wzd2XozpZ2ZO0a2/84I+cV2upxDL5oAL7qF9SzVeMqED3osmUXf/9yg/4yf4bgK3pU6nllB+GEwvThZ1D15Ja0EJCbcn3kIiWXS1buG55Yvx2vomG+zNi7edx76nLqOyXpEdmxy67WFkS12IZWGnRCcVMyHoHDwBViRLB7daNtHYVMR2yCrQxq3tOEyefgBVG99GobNMKwXtXdqga4q/y5IFc0yuHIMhjfb2Npy8WMZ0G2SGDGQgwowL7qcrXonvBlU2fI+GJ2+mLEiNowINiz++NIyNkkg2tfctwtvaHC4aSm1PtG7JCHnYHqnn9zszjwxjwkYLeuCqVzumVj1aYQFwjTiRDP0aV76fXhlu9GtEMMbXXkjuhEm01GwKP3tbm6lZtTAs4up3thJ/fClWTj5ls68NQwaS7U14XlpW5Q6frAlTmTr3WtYvnzfg61uRKN3Vb5I95aDBvK1dRpSvpyiztXtdZ63WK6DEyQoSsGr8zlasnHwkkg2q7x8I5ftEYlOxc4vwnSxUohPJzmPT+rf4zswjAXAsoXzavjtdPsVgMEB2bIourxN4erytzdSuWqi9OPRoWP3DV1M6+9qMhUFeR3OafjUTjU1j0twlbFpx+ojcy2CRm5vH5gevxBpXAOj7TNRv6KNfXkdzkGQ6u9/zpOvXxFNuoPbBK/voV6pmJGA0bDcwxtcYRCJZ1Ky5HCuWH26ribdCelyA8gmtKuVTesoN4S63pQ6noJS6B74HAirZTemsH3L4AZN4+e1qIsWT9DlXLWLisfPp3FKJZfW/iqc/rEgW3VVvDJrxlZub128iwN6xCttqN2WGdvenhKSpqw3JziNSPJnSOdeRCspNxDfT8MQyys+4iUT9euofuhoAz3Wp2vg2nqtXUOrM+Ao/0Un56T9BeUncpuqwfuWmFafvUvkUg2FvwIroeM3u2MTMHWneMOUn0eKkeumXDu50W+poeGKZTpDa3UnZaTei2urxXJdIkKS5ZtXFxI6bT3d8c7+rqIeTgWjY9tpcsmJ1Rj3Mhq3NoX7pcmza81X10zOYdNYKEvXrg/qQ3rb1K9CwstNuzNCv7vjmcDGB0bBdxxhfexjpBbOr4q2QnU9ntxainCz94yw78UKs3y4NY7COnH8rVXFALFrrgozHqZU/wbJrKxLVOraDwtERxyYnKxK+P3SGztZsFQ88KDM7N5/mvz5CxxvPh9uS7Y1Mnb7fgM+RzkBHWal2vYv2pt6nROyyeTPpJMqWB76H6wYij8LraAag5p4LcdsaiJToQNb4uhvDepNeRzNOfgzJGo8b30ztfReHHsVU3rKeUkUGw95F+rPXFK9HsvNIJrp1rGmQQ2vCiRfS/dtbwlin8BjbprM+CE9QgNNTCkiHBagd6pftOGGuLseJhHFW3QOIN00hytcVPfrZvqsMRMPS26R/j+3tbWHW/5SGnXfiEaF+ie2gPA/Q309Kv5y8EhBLF/AOSNev+oevwu9oovb+xRn6JZZDfpZJs7O7GONrDyO9YHb98sW4KrNEzraOKZ99DbW/WgaWBb6vXfFBnivl+yTqNgT1E/V0m9tYhdsaDxL3+cjOZIrfDrG8LGhrx8+y0CEI+lcwN7bfsLmqB+J1Ovhs/R1vfueNjJFy2Wk3BiPIeZSdcj21QX1Lt71Rr5ryXNzWeOhUTBW5BQnLilT97KwhujODYXST/uy9umI+nlI71K/UMc03nU0kNlUnQxWBVPZ339d5CZUCJ4KTX6rL6bTGqbz1dJTv4zZWEc3O3maOsJ0htfKxN7mxA/ppPTTsSMMKJ5Sx79nLM/RLKUXd/YspnbuU6jvOoXyeLrBdc9//4DbX6lXrKf0KKP3GD8K/Cyn9qr3vYsjKwbB7GONrDyaanUNXfSUVt54ByscOYhksSzh0n8yRnGMLWDZ1qy/FHl+kM8+nBZRjOzgFpbiNVYBCIllE8mPMOGsZ6++8gIgzOL8qqRqS/3yrgtv+m8+HTzhlUM47lEgYuCq4TdVg2Sjfp+a+/8FrrcceXxTUjCuGjkZKZ1+HnT9BL/Mu0bUvkw0VYT4hg8EAdvY4EvWb9ABPeViBHqXHFKVjRbOpvmtBGHeZrl+R2D66jqGbxG2qQqHCkj6bV56HHYlsN0fYzrCnxTOF+uUmUL6H21Qd6pdKduF3tupFR2n65RRPpubu83GC1aFGvwYfY3ztQejpw1bqly8Ot2XnlxDNzqGQ9n5TPaSOiXUnKT5uAfF1N1I6d2kY14WILkGRgaCS3bhtcdYvn4fn+9p4QxtxqQSqyfYmXr9TB3PG8vpPSrotPvSeqbh/eGmnjhludKkSlTZNqL1/kcIyHbTru0ycdQ3R2DQS8c1hwW41gOmH5i21fTJaAwNadWUw7Kn0/r2PBnVa3bY4t657sd9jWhribH7nPxQfqwdu8XU3EjtpMU5BKWJHqFm1MPMAy0Y8hdu6hU0rtOcrlSzVtu1wZXWyvXFM57Lqq189pPRr0lkrSATfR4Z+7aDOY0tDHF/5fTQsPdGsYfsY42sPIjV9mF6XEaDi7oWQtnglPS5sU01cu/X7lLVI5bNSWNEc6tdeHiQObcTOyYeuVg7br5QXbztPn+/pG3g9ODIVmvqBGRNCT9auYG1t2OVjh5JUYGtTvD7QoCB/ju9T/9D39agb0TFg6eU++hGscDrFczNKNnm+QrAo+cICgDDVRf3aK4fqtgyGEUeJ1Se1BJCx2jA9nql5Sy2er8vn9DqT/tdLYkVzqF11MV5HIwB2bjHKc7Eti1vXvajP9/RP6F31cer04Qt1GG5yc/OouP+SDP0CrWF1a69ALFuviIxvDnJ/2Rn6lV6BRLmJDP3Ssa+6+HdhoF+gNax2zeVM2MXY3b2NQTG+ROQLwM2ADaxUSl3fa/884EdAVbBphVJq5WBce28gZUxVxVspcT2SQdC8ZVnkTpjUp316XNjm687Bzi3GKdLtxHICN3RPwP3Er19N7X2LKDlxEfHHb6Bk5iIaHv5+eL7dMbC2xwemx6hZ/wbl+x44JOffFXoH41s5usC3FR3H5LlLqLj7QmInLqLhqZtJNtcAenUovkeysVInXvXTcqHZEZ3M0HZofOY2nXrCsrBy8vHattDw1M2I5VB20sV4noev/DB4Fsyy7eHCaNjQkf5M+b4XBs2LZZEdm9Knfe+4MNxkj3450aDodaZ+AUFG+DZiJy5C+e6AKmaMNVoa4hn60Vu/ADYuPx0nf0KoX0Af/VJB9ROxnFC/orFp1K6+pI9+6YY2E2fqRQj9LQAw9GW3jS8RsYFbgc8DlcBLIvK4Uuo/vZquVUot6HMCww5JGVP1yxcTLZ6MH4xQ3KYqWmo20dXaQM1WmyPn39qvoSTRcVT97CwdEO771NwdBJZbNlbWeCae/P1gJYsunh0tnkQktyj0ng0VX//ke/jfdU8Ou/G1vSXb6cJftfFtrPxSxImGJUms6Djq1l4epA7SSWpTAcNWJCtcEQSiU36sWhjmHQKYOOsandw2vxS3pZZobBrVdy3A8zyyYtOI5BZnBNKaZdtDj9GwoSX9mWq5+RycwlKUUrhNVWyt1f4o5etBR3/TfxLJDvUr1Tb++NJM/XKioYZFYlNRbqJPyZ2xxLY0TPnudvVLN/Lw2rYAQqRkCipYuNDzHWrDuLd+pWZGJs6+NkO/AKrvWmD0aycZDM/XkcA7Sqn1ACKyBvgS0Fu4DINATpYTlvjxLIf80im05peEJX76o3zOtVTddT5lp92I17pFz+l7ujRO3dorqPrZNwGoX3t5MMIcnmXEZSX5WFtfHZZrpbO9kVj6qBGCJBNuAuXpYrux4+ZTu+Zypk7fj03r39IpJgLx8pPdSCSburVXpLJThCeJFJThJ7vCFUOpnal4DM9NYhgxjIYNE7atUz0kE92IFQkzxUfzY+x79vJ+/1jHjr+QLY/fEKw0juI2VumpsDT9EssKvTXKTQyTgo0c2ytTlE5v/QJdy7JHvyw9tRik6Qj1CwhqFIX6NeHEC6l77Aac/In01i/DzjMYxtdkoCLtcyXwkX7afU1EjgLeAhYqpSr6aWMYYlLu+2RjJU5BqS7UatlMnncTNfcuZMZp1w1rf8rHeXR2tJEzfnQGvDr95QSKTeSSFav59nEf1DlwQMdLpDJp+x4fvuIXALy85OsosULh6o0ViQYj+rH+52JUYzRsT0EpnKJJGfrld7Yx+Tt3I2JRc+9FYR4vw471K/SIDUC/+sOsgNx1BsP46u+vRu/I418Bq5VS3SLybeBe4DP9nkzkXOBcgNsXz+LcL31iELo4Nohm51Bx90KSrk5457Y30pJXRDR72+tLlO/TXb+xZ0NaqY1wCbLvkYhX4LY36uD94FrQPgR3kcmnD5nCU//8Iwd98sQhv9bOsr2VUUsWzAHbpmzuUiCtIDdQdfs3Wb/yfJq31OIrHXdh5ZfqVVh2pCeI1fdIxDfjdTRRt/YKsgomYGePG/4bNQyahqXr16mLruGok+YMdl/3aOzscVTfcxGum8TvbCOaVxxu749Ee1NY9B7oo186/kuRbKhEELz2pjAB6u4kPR0L7Ghlp9hO//r1s23oF2gNS+VlC/QLdBB+9T0XGf3aCQbD+KoEpqZ9ngJUpzdQSqUva7sT6OsC6Gl7B3AHAC8sH5zMnmOET5yjA0tf21BHzoQprF95AbGZi8JtNfFW9j11GVsaW3qmIH2XhnU34nU06xxVKZQi2VChMxoXlJJfOoWWvCKOOX9p2GRb05iDyVGHTOPelS+MSuOrbOq+4fvu2MQw4zZo975YDm5zHUBYZw7fRVILScXCHpePlZ1H7f2L8TuadC4iCDJGR3CKJ2GPL8Jtb+D9C24blvsy9GHQNCxdv+7843qjX71IJS+u2vg29b9cSsnMnvQ4VRvfpilej2NJaCioZDfxdTfid7bqHIQp4yvUr0aU7xEtLMOJZhHNKw6fo7093mh7+gWAUn31K/gEffUL0BrWUJGhXymMfu0cg2F8vQQcICIz0CuBZgMZmTNFpFwplVpacRLwxiBcd68hlpeVYQjVxFuJ5BYhTlZG2olIbhHN5JDwW8NtTiRK+ZcXU7n6cqIT9gGCcmheT4yR11rH+uXzsJSXcZ2dzd21K2RnRRhHF24ygbOLLuy25kbW/Oi7zFn8Y3ILinarPwOtEwmQE5tM05PLAMIyHl57Ezml+7Dv2Tfy6or5uG4yXGVUdf8lOlCYXgV+fR98d8DXNQw6RsOGkP6eqaZ4PXbeBLKCgO0Uyvdw6UmeakWilH75e9T/cinYTljSKxVnaeXk43U0U3XHOYD2du1pubtGSr8sS/roF4CIsO/Zy/voF0DFPRcTf3xphn6B/nti9Gvn2G3jSynlisgC4Gn0Mu27lFKvi8gPgL8rpR4HLhCRkwAXaATm7e519yZ6r2DsST3RFE4Tgp4qTHR1Uj77Gg4Oai7WL19MzoQpKN+jdlXPKFN5LhHHJmopOp/+4fDcyDb45HtKeOvt15h+0OEDat9brJ7/xd24G/7C84/ezfFn9k00uzPszLLo1Cge9Kg9/vRtuG4dnVsq+fuSWbqKALC1dj1iO0z8+lUkGyqJTJxB7b0LKTtjWXh85YrTzbLsEcJo2NDS3+/0kgVzqNj4bt8aiZZD2ewfhvFJr66YT1ZsGuJkUb/2CuxcbZwoz8VxImTlFpCTm9PXqzPKSdewl55ai137Cisu+joLbnpotwywndGPVAki6NGv1DRif/rlFJRSctx5Rr8GiUHJ86WUehJ4ste2K9PeXwJcMhjX2ttIT5has6WJpEq53QEBr60JlE/+xCl84pyreerab1L92FJSQ3Svs5X1/6cNtLK5S8Pi251bKjl0RumwTC1uj3hzO4/+5s/kHMCAja+XnlqLU/caLz65hg8fP4s3freWZUdHuPh3a/jUV8/c7dHjruIntmLnlTDprBX6c7Kbuvu/h1M0CbelLoyxU24C5bu6Rl0a/dWLMwwPRsOGht5581JxRCiFEki06cSoKI+ciftA6xbqHruB+mAFnt/ZSsVdF4DlYOXkUT53KU40i+745tBAG6zpxa6t7fj+DmaKleLdfzxHsq1JH9O5led//Us+dfyXyd5O7G1vXnvpTzS+9SL3XPFNGqs3cOwMeOffm1hz/UUc9vFjMi/pRBERJu53CMXl04lmZ+M4kZ2+vx3hJ7bi5McoPWUpViRq9GuIMRnuRznpCVOrblrEpFNvxHIiJBoq0wqdLqKjvZ3XNtSBWJSdptuICIl4BZHYVCpvnUfNqouJONqln2xvworlD8vUYp97am7nW9f/nDsuOY1VT7xAa30lVR1/4bPB/t6erfTPSine/OMvuPUrk/nWYw/zp3UP8NXJ7Rw1I4/jN2wdFO/XYFC7+lL87g68tgaq7zhHFye3LJTvYefFsHLyiBRPCUO9xbJoaTClhQxji94FoP958zlMOvXHPeXNAmrvW0R3e0uoXwBWJItEfHOYC8/Oyad61cU4ToRkeyPdsYnA4Ext1Wx6l/+uvZZD95+8/YZK8bmphfzsyd/yo/NP5qFnXuEtv4Z92v7Ftz7/6bBZQ2sHi1c8wo/OPxmlVPi+OG8cDa0dfOvx17hpZhHz1rzGRZ8q4qjyLkqzojz01sss+M6nKM7rCVyvb26nO+nzz/WP8e6fG/lHLRz8iWM55JNfQGRoVkkPWL9AZ6OwLJri9SxZMMd4vwaIMb5GIenervRajt7WYGQhggCWE6En07PTp+wQAJZFMl6B8l28rc2QVnx7pFj1xAs01Vbwo58/zc+f/AsPn17KrNUV1FZsoGzqjAzP1mfmzM/4DPDFA2D/iTlMtDbT3t7CnINziIjPWR+K8vXH1/Chz36ZX91+7aDEUGyL3rEVqVJEuvQQqMRWyk//iX7vJXAbdfx23YNXEjv+QhAhGd/UUxxYrDGdFNKwd5HyeDXF6zPq/3lbWwk1K9CvlAGRnqA4FfQtYgUrgpvxtzajAGXZWNITb7S7vPXS76mt3MTsow/ipI/vOOHzsvt/Q1djLY8++w9++9dXyI0ofvfXVzh/1mcpKRgPwOqn/0ZXYy1Pv/AaQPj+4rnHsvrpv/G1AyO8XNlFQZbiU+VdzCgUzj8yypPvdPLos/9g/teODgeoHwgMwiMPnIpSipITL2daVhtdh31yt1L0pGtYj37lIdJbv5J6sQP96BdoDROLSG6R8X7tBMb4GoWke7v8YGVjZ7dL5e1nA0HAPKlSXFqkkq1beOsmXR/NbdErWATAsojGpgLClFOu49AZpRnXGu5px3hzO+v+8BK3fTXGcXe+wKRxHs+/28Gx+zqsWDibBctWh56t89b9goM+cRxv/P5hrvu4cMmzD6KwuODL+cxd+R/W17Zz5gcjFOQISddl/5Iox0/dyi9uuYKspjcHJYZiW/Qe3aViWEAnK0zVSRMRXVczKM8hloVTPDkc1Tc+8zNUYisoP6O0kImfMOzJpDxeVRvfJis2DTfRjQKqbj+bnrqy+h+d5kDhtmrPr9tSG55HxAqeG5toXjElMy8OpxtT7Oq049b2Ntb/6888tfJ63t5QweUPXLbDY9L16xv3v0CR001SIC9rK59f8BOeWbEQpVTY5txfvkjS87nq4zbXPvciMz/1QR793d8Y57ezPt7F2YdFKcqGwhwhYsEph0a4+9cvsLUryeb1b/PTR57jirN6EqeKCBWPXMnNj7+8S/ecTrq+9OhXQO86tSJEJ84I9UvsCGJZoYahfBJtjTS1YzRsgBjja49h+7EIYtmUzrqGLY//KMOd7zZVk4hXDGsOz/RpxZKC8cSb25n7/f/jjQ21fPWYD3J4UTvjI+PItV1u+Fw2lz7bQltC8DtcfnHLFZww3SWnbSNHFfn84pYrOHZyBzPyknxmYhN/rVQcf/NGDiix8RU89B+X2/6eJCci5EQstnR4+PJ3Vs8t41sPVQzpNGTveJYUuvJQaj4x84tXvk91sDIrlZG7dFbPgodp++uR996+TN4wttiueqVKdeXHdCB9QRmpoFa3qYpkvKLPczQYvPWXX/PPX93F7248m/zx2RTk9sRspWuYUqqPfu1TmMdHSxP8abPHbSfm8NW1W5mc38VPH3kOFBxe1I50JTmsaCtvxBX75IWJH4YAACAASURBVEX59KQEZ/7wbl7bsIX3xWxcH9b+O8nyvyVwLCjKERo7Fd0ePPLMn7n52CiX/OYvfOdrx4QeNYDxOYMTKrJD/Uqv0hGQrl+pz6A1TCwHy2LQY/HGKsb42gNJyZBY0meAYtl6GisV8+VZNvmlU6gbRDf99og3t3PsBTdRIFu59aHneOmNTby1uY7qeDO5EXj82b/xwFey+Okfa5n7/gjleUJ+VNHt+kwrsIi/9SIfP6iQqHgcPsHjrif/ilsI/94c4dwPOdzzUifFOcKCD0f43m+7iIhi3yKbdxt9GjtdCrOFme+xKbVaOOuwLFY+vXrIgvB7x7O8vnIRnfWbqPjZWQAkt2zUIqaUTkyoFGJZTDr3DvQflmri627EKZqM21TFjgxsg2FPpo8zxbK016vXjlSwt67+4JAzcRrR3MF/fgX44ZmfY/KEAqy0tAkpDcvxOzjmOzfSnXD76FdFXSP5UfjC/g7PvJtkWoHwkck2dz36LOOys7jzePhPTScPvtaNJYoFT7os+ngOt/ypitg44bxAvzq6tX693eDR3CWU5Ai17T7HTlccPSPKZzd09vF+DRYD0i9FqGGhfn1rZfgzS2kYEKSeMKETA8UYX6McxxZaazahUPjJRFiHMTUyFMvCTyb08+F7KMum9r5F4X6vo5k60Uuzk67LG5vrOXDaxCHr708f/j2tTQ3c8vUSzvnVCxTYSbo7EkwYJ7R2K762n9CehNtf6uShk3M4/8kuNrf43PC5bE5/rJPiHHjyP2388JgsVr2SZP9ii43NPuOjPr951yXLgXMOi/LvLT6TcoV3mxVXHRNhyZ8TNHQoynMtTj7QJjeqmPeBCGtfbx+2IPxU6onUiM+ORMiKTWNr/aYwP5FGqH/4avzOVvzOVuoeWIzydBbvttyCjBQWBsOejG3bQfFsHSvRo2E9+gV6e6RkMiI21Xf11C73Opq1leR5YNv4rtvfZXaZ6eUlGYYX9GjYJ9+bzfMbmuju9DP0y7GFo1c2MmEcFObY1Lb73PnFHC54qpPWLkVpjseBsfFc+VyCsjxhawI2Nvs8/t9usrehX9f/WeEr4bsfj3Dlc93MOjgCSnHWh6LMfbyv92so6K1f0/Y/KJw23lq/KYzNs5wotQ/87/+zd95hUpXn+/+8p0zZ3thdOiwdFEGNJYmxN4pYQIqi2GNERfhGEzFRE6PGxIJibIgUERBsSFOxRKNRVJSq0l1g2d532inv7493ZnYXFqOyyw+Tua+Li51TZs7M7tzneZ9y30grhHRs3GAt5UsfROgGQjPoeMP0Nr3O/xYkgq/DHP265KopxrQ8jLQccs67Nb6vZN6tSIx4o2rpojsBlQo20vMouOJv7HruFk698QHefexW0tp3I1i2u8XXaQ2UV9cz/42PuPoYD4YTxHRC3P4rD/e8L0BCdUjy8lc289ZZ9M7WuPufYWoj0DFN4+sKlyw/WI5g0SaLBRtUKXHRKD+jFwcZf6TBH98L0z5V4/w+OtctC3NCZx2Ey+o9Dqd01Xl7u+TCfgY9sjTKGiQFmS7jjvDwxIoX2PnVl4yfOu2QyVCkpKSya8EdmClZROoq0JOj+kTRNL0bbiB//IPYVUVxv01QU197dm5JTD4m8F+B/M4F7Nm5BS0tDyklRlo7cs77LQClC+9QC0ddcZhTWxY/T0rIH/MXqpY/FFdOXzf9ejSjbW9ZMQ4be6TJqm0h/nyKhz+9H27GXy+st+iarhG0JQFL0j5FcHR7VXHomq6xpdKl16P1+D2N/JXplby4waJ9Wsv8dXJXnQ2lLt9UuIw90iTFIyius+md4+H0zgH+/vybbN5VEm/laGvEmvGrykubcZh0XaR0cSNB2l/2MFblHnDtOIfF+EvX9f/wCgkkgq/DEC0p2uNLQxjeZv1cwvCQO/qe+DahK+2XvXNuQdph6ssaHVLivpBRiYmmr9Va+Mfi9/DKMFcPTuHx1UGG9dIZlK9xUX+Tj3fZJHs0NpW5pHgFt5/k5aYVIf5yupfHPonw4oYIHdN0Mn2wrdKluF4ysr9JpzSNS480WbRJBWMj+hhc8kqIIb0M3t3pMO0cH79ZFuKeU728tMni+XUWs9dalAckhoDsZAM3FEIWfsKjN13ETY++1KYB2MYZUwiVlwKgCQ2rvhJpW9h15fG+FbtqD0QFDAFKF9+NDAdAgBuspfT1B7EdKzG2ncBPFvtO0gmfmsrTfClxvlKTwbXxMnyMvwD2PDEB6VrNnlP3JVG84I64xETT12otxDjMo+mc3l3n6A46F/Yz+Xh3I3+legR3n+rlxuUhpp7k4R+fRnhrm01tBBZe5GPUoiAVQcn4fo389dEum4qQbJm/lof40ylelnwTwnahJix59gsLV0J+SoiAJan68n0GtPdx5o0P89Zjt/znN/Ij0ZS/oJHDXCuMrFcOW1bZTnCdqEuKBE2ndPFdyEgwzl/SsZGhugSHfQcSwddhiKaK9sdd/zh7yiFn2BSklPFJxooVjyJdV9nVaE3sODx+cB3cYC17599OWju1Ion5Qm58Zgrbn2/9ElxsxXjlkYpA395ucf/pXnKSBOMHGizbbFNUJ/Gbgov6mXTL0LhkoMnGUpcTO+u8t8Pmgr4Ge+skXl0SsGHS8Sb1EcnVR5vMXWfh1SSzv4xg6oKqoOS8PgYndNI5s4fBO9/ajD7CJNkUjDnCVI2sn0YIODYppsbdJ3u45Y3dbV6CdEIB8sfcs99E1uf3j+aY3y1k3fTrScrvgdANNNMLUSFJpW2kGoyFZiBdi/r3El5pCfw00fSGO3H4CWQPm6LSWboR5zCnrgI0neJ5tzU7V/P4ka5LycI/kpTXNb59wNUPsn3GjW2mZh/jsNF9DN7aFuGvZ3jJ9gsuP8rg9c02JfWN/NU1XfHXpjKXX3Yx+N3bIcYdaXJkns6ZBTpvbHOYdILir18fa/LCeotARDLziwg+Yx/+KjB4b6fN+X0N3tlhM/+iJBZutHhsdYQdVTYZyV7SPBZTf2nym9cr+Mfid9FTstvkM/jB/IWaSHWDdeSPf6gJf9l4vF7q33i4Ta7zvwGJ4OsQo6mGV1PEMlD77ttTXgv+NNLad6UhFCHWbi+tEHkX/6nJ2K/avnf2LeqL4U/DY+jxoKutMWfZR3iJsHCj5IlPQwzrpdMpXfVSZPk1zuiuM2edQ26yxog+BkLAb35mMnpRiN8ca/L6NzYj+xlsq5LcvNLikiPVqrEsIKmPSC7qbzL7SzUV5DclL39lsfLSJGwXLh1oMGJ+kKAtsV1YsMFCAj0yNXbXSob31umZpTFhkMHMlS+0WgN+Sz5qVn3ld6bcdV8SRbMm4dRXxa080HS16hfqZzMjD6e2hNbtbkkggdbBgabk6irLSM1qt992x7FJyi8gEg4136Hp5F38J8x23ZR2oYhx2KRoL5g4pP2PMQ6bs9ZmWC+djmkajoQMn8Zp3XTmb2iZv274mcmLm2DCQBNHQmGNZNyRJh3TNMoDqjH9ov4mizdZ1IUlQ3tqLNhosSrKX5cdZXLe/ADVIYkmYMjzDZiGoEemxvYql/qQxS0neMj3W1x1tIc5Kz/iomFnkXKQ7/dg+csq3wWOjZ7VUXGYYYCmY2TkYVXuIb9zAdsP8hr/m5EIvg4xmmp4NUWszBjb9+EzdxIJBXEkEKhlx8zJuFKieZPJH/sXdVIbqRv/GLy3ZjO25mNHWR0eQ2PhRpsXNzaGD5YjOTJP4+yeJnkpgiRT0CFVMKyPwbwNFuMGmpi6oFuG5NsaydLNNks327gSKgKSDJ9ACDA1qA1Dl3SNeettJgwSGJpgZH+TRZsiSAnn9zFYU+zy0Nk+zl8YYEhPHY8GVw02eXFTXatlv1pKp0+dMIz8zgXNtm2cMQXXdfjs/jFIK9yYqYw22eM6SNdGi9l3HPSVJZBA22HfKbkYPr9/dHz7xhlTcEIBAKSU7HruZjX16/GTP+7+xpMOQw4ra6hl/gb3P/JX+xTB8D4Gc9dZXHqkidcUVIdcNpS5FNVLlm1p5C8JdM9Q4tgvfe3QK0tj3gabKwYpfa8RfQxWbLWJOJJkE/JTNB4d4mPovAAaLmcV6GT5VRVh4cYQn3z5DcPPP7j3+2P4CxnVMdT0aNkxqivpOvGhoZgNUQLfjUTwdZgiEgrS+YqHqS3Z3Tjqi2pSLZo1SU0BNYGMepJJKaNlxzosIfcTUW0rO6ElD07kglsfx+s0sLteo32yxvxRSYxaUEdxvUu6T+C6gvnrLR5fHcF242+LsC1Zs9flyc8ihG3okiaYe4EfV0KSCc+vU1/y4nqXDwodLBemnuTl5pUhlm+xqAlBuk8QtMBy4eWvLX79My/tkgVjjzC5/Z0QTwzzc3S6xpgBOjNaMft1IDS9+UTqKtGTMgCVBeh84/MUz7+d8mXKmNYN1ioFaU1HHCJJkAQSaEs4oQAdJjwCQGDvNoRuIpGULryDvbNu3n+BIWNiq4rDpOuCY++XmWnN/q59seTBiQy95TEMq57KoOSNy9P+I38JIGhJvih2eXqN4q/RAwwmHqd4VgJz11lsLne493Qfz66J8PLXNnf8yssNy0Ms2hgh069R2iDpliEoqoOygOTSgQY9MzXO72vwdZnDH94NMfM8P1l+GHeEwd8/2cpptdUHpXD/XTgQf2n+NGQkgPAkxfkLTY9zWNO+vQS+G4ng6ycAMz0Xv9ekIWShJWfS4fKH2f2PCQjdVDftJqtHp041RQoBHdtltEl/174or65n1O1Ps21nIc+N8HPFqwGG9/bQLQ3O7mEw68sIVwwyOb+v+mKm+wRz1lrMWRth1WVJgGBHlcvYlwJ0ytLYXO5y2uwGDF2gAdlJgtwkwTEddDqkSs7sYfCzjjrn9TFYsN7igmi58pedTZZstnCk4NgOOqUNknN6GLy40eLCBQFSvYJkj8Dv1seti9oKsZuPHQkTqS6ONxkXPX0Nrh0hd+Sd2FVFIKUisWjQjCawKvfg8R56z80EEmgTCIE/V6nca/408i55AKGb7HliAtBEDy8Kp64CoWkI3dtm/V1NERNUveb8X7Hu661MP9fHH98Nk2K4P4i/xiwO0DFTY+lmmyWbHQwBtgQN6JmlkeETbK5wueRIk9MLDC47yuSJTyNEHBXc/e1MHws3Wsxfb3FCZ52vy11G9Te59BWbNK/k+Bn15KZoSCkwgc/ffImzL5/UJp/JgfirZN5tSNchrwl/AdFATGm2WeW7SOTv/zMSwddPCJoQ4DpYFbvVylCq5VflqqdwI0FACd3p/jScQDVllTWH5LrmLPuIbwsLuaCPQbYfLuxnIpBUBBwu6mcwZ12EFzfZzFprEbTU19KVkOmDqiCAxJGQ6de481derl8eJGKDJiQj+5lcMdhDTUhy26oQugaXDTTI9gtG9Td59Wsb24WIo4I0UxOc1FWjfbIg4kJ+quCaYzw8uyZCQ0RSHZKYHpMtX3zYJsFXrI/Cqq8kXF6I05IuUVR0FU3HzO6E5kuhdPFdgMCTmoVVX0lmTm6brvITSOBQw/B4EUKJC6PpisM0nYrl05Bu4/dEaBqaPy2+kGxrxLxmr7t3Nhf1MyjI1Bg30OSZz8MH5C8pIcMHlUEQUf7KStK462QvN60MUReWpCYJqkOSUUeYXD3Yw9pih28qXf4xzIcu4PpjTeatt6gOSnoma7y302ZkP8VphlDx6MA8jYv6mQhg8VcWRXUuQgh008+2tR+3+mfxn/hL1V+a8FdWJ6Rjo/nTKFn4R7zpqufPqq/ETnDYd6JVgi8hxDnANEAHZkgp799nvxeYAxwDVACjpZQ7W+O1/9shEUQq9hCJLgydhmqK59+OdCxKFt6xv0WNZuDJyCVSWUTF4js57vrHm01PtjbKq+tZ+MZH5PhVuh1gwiCTG5aFOLa9w4wvIozsZ2JqMOYIk9tWhbn15x7GvxokbMNZcxtIMlUf16UDTQbkalw12GTOWouABS9utFm0yUYXkj45Gmf2MMlP0dCEWk2e3UPnw10OD5/t4y8fRLigr8HstRafFwWpDLnYLvh1yPBrdElXgWnOyVe1WdYr1kcxdcIwOnbrReHWr6LNqCbStvY7XgiN3FF/ouSFW9GFYODEJ9p0oiuBlpHgsLZBJBxCSpdAsfINtOurKFn4B0AJP5dGOaypxZbQDYz0PHY/eSVTJwxrU4/AyroAS//5KVNPTeHKhcVcOtBPuk8w9giDixcFWb3HbpG/Lns1SG0ITp/TQPsUQUVASeP0zNa48XgPH3xr0y1dY94Gi0UbbZZvsQlakgmDPOQmaxgadEjVuLCfwaKNNtPO9jH25QAndzMY0cdg3EsBHAmWA2ledR+4oI/B1lqD0/pksKo8l3OmPtPqn8d/4i+BaJ7Tit6X8sfey+7HL49rsiU47D/joIMvIYQOPA6cCewGPhVCLJFSbmpy2FVAlZSypxBiDPBXYPTBvvZPEftqeDXdDo2N91Z9rRJEdW2E6cGTpZzt9eQMhG6QPXQy3pzORMp3Ub7sIcycrliVu/Fkd0QIgdB0zJTMFicrWxNzln1EmqznzP4m3TPVdKIAjumgceVrQbpkaLz/rYWhwbXHeDi5m87NK4NoAs7tabC6yOVn7TWWb3O45QQPXTM0fvMzD9NXW1zcX2fJZoe8ZI28FI3P9zp8VR5h+uoIoFafEUcy5giTzukag/M1XvnaxtDh9O46G8tUUFcfkZzWTefVb2xqrRp6prVN1uu7IITWYiJeBc8Sp74K23X49N6RCAQTh59AenZOwpz2ECDBYT8MLU3JARiaiG9vzJxYaIY37vBgpOWQM3QKJQun0vnGBQAUzZyIJ7c70nWxKndjZncGQAidgqsfa1OPwFfeXcOwnhpvbqzk0iNN/IagJqTyO8d11Hl9s83qPRaZPtha5fKzjhr/91YQXQNThw5pGid3VQvAkf2VBMXlRxm8/o3N5wGHYT113v3WZcnYJIa+EOC5Ly1mrbXQor6JMf7SNBjS02TkiwE0IdAE9M3WqA5BhhdcBLYLa4tC7A4GCVP8n95aqyLOX/vOR0TN0V0rjHRtPr13JLHDEgbb343WyHwdB2yVUm4HEEIsAEYATYlrBHBX9OfFwHQhhJByX7ev/3583yzUcdc/TvmqBygtrcZIzYlvF6a3mUDnvrAqi0C6SNchVFfFnnoouPQhclK9rZ4BK6+u57V3ViNthwUbJC+sV+KmsbS8x4CHz/Yx9qUAuckaP5/ZgNdQq7mCDMGCjTZPDPVx61thLhloouuCkA2ZPo1zexrM32CT7hOc2t3gysEmQ+ap1aDjxsxJJElR3Z3uGRpjjzBZU+zSL1vw5jaLrCSNO0/xcv+/IqzZ63BhX4PZ6yWXTN1/UqstIYSmZCVcB+k67HnyqvgoveZPVe/EdeLmtADStdENg8IXbueGoceS0S6/2XMmCK1VkeCwH4Dv83d338Sx1K96lIrSvc34S/MkffeJjh2XYJGuw7rp12PVV7aJWGcw0MDnq9ex6OI0LptnUxmUzF5rkekTlAckugYFmRq6cKkMSorqJB/ustBQ24vqXJJMWLTJ4tfHeumRpVETlmT7Nc7tZfDWNpulWxzGH6XKhq+PTeKMOQ1oCGyp+MtvqkltAUw+0eT1zRbDexvM36Ay//ec6uXmN0LMOt/H7W+HOaeHwee1Hi46/Ret+ll8F5ryl11bjnRs9vzjcpryl9B1pOvQ/pK/AY385dg2hQv/EA/EYkjwV+sEXx2BXU0e7waOP9AxUkpbCFEDZAMJD5UWcCAtMM3rJ/usGyh59f4oSe2ifPk0nPpKrMrd4NhqIaIboCmtr+xhU3ANnbUL7mj1EuScZR9xcscI1x2VTKZPogt4bHWEuWsjZPoFZxZ48OrQLlnj/37u4cYVIQJhyEqK2nEIl3UlLn4Tnl1j8ewalSFDgO2qPooeWRrv7LBZ8o2F7Uo8uuC+071MfSfM0F4GuibonqmWYx3TNI7O11i0yUIXgtO6GxzdXmd4H0WEL0d7KVa98A/Kdm9n7K1/b7OJx1h2YN+bjyenK5HyQjR/GoZhYtsWQtcRmkbFyum0v0xJjUg7gunxoiWlYxjmfqP9bZkN+B9EgsMOIYSmqSxX+S7KV0zDDdY2at41HoWenEHmkMlI12bX4rtaPQDbvHY1p3bVCAVqmXW+n5wkwWOrI/zrW5v2qYKTuxqM7G/w/DqL576MkOyBJENl+Kb+Sk1bCyFwJUz7OMK0j5XHrq6pntaILRECVm13eGdHkLqwi98Q3HOalz+8G+ai/h4sR5LuFWT6BTlJgosHmMz8IoJHF5xRYNArW+Pi/iZLvrY5pr3O8+sieI0KnnvpLSaedlObTTsemL+64EYCIHR03Dh/AQjNoOKtJ8gfe2+cv6yIktZJ8Nf+aI3gqyWhln1Xg9/nGHWgENcC1wI8detorh1x6CL8wwVNtcBCWxotgkoW/oG9829XyvbLHkIYXrDD6MmZ0cZH1VMkdBOrcjdCN/BkdcDvNdukBPnems18+XUDT3/cvCkzYEmqQvCLzjrLt9qMO9JkXYlL7yyNrZUuQ3rpbCiTPHSWj5tWhnjgdB83vxmiOhgdNQdCNvTIFEw720tVCO58L8zAPA1dQGGtZPxRJh8WOuysdli62SbFI+KaOiEbkkzJv3c5XHsMXHqkwdLNNh1SBSd0Nln85nzaZ/jadOIxdpO4Yeix6EIQqS0DoeNQBUhyhk6Oa20I3US6NuVLH1T9fJGASh0KoW5MCDbOmJIw3G47tBqHNeWvS6fcw6/OG3vwV/cTREwLzNj6lfr7tiMgBCUL/0Dx/N8DULbkr7jBOsVf2Z3UsJxjNeMvM6ezupGnZLUo7HowKNq5hddCYWb+u/nzBiylK/j7X+r4TaUv2Dtbx3Yhyyc4tcAg2ydI9wr+fIqX21aFKKxxyfQLiuokJoq/pISrBnu4fJCakhy7OMClAz2sL3UZP9Bk9R6HLZVKTywnSZUaXankcgSSj3Y5nNvT4LpjTS57JcRfTvOyarvNiV29vLKxno+WzOO8637Xqp9JDPvyF0CktgxH6HGdr+zRf6bRIN1AOhYVKx9VHBZuaJRIimYwdV9SgsOaoDWCr91A5yaPOwFFBzhmtxDCANKBypaeTEr5NPA0AB899j+X0j8QPLndMNJyaH/5w0TKd2FmdWLvrJsQgPD42TvnlsZypKbjNlRhZHXElRAMt41W+pIHJ/LQvDdZ9fa79Mu0+PNpXn79eoBlWxwu7KdUoN//1uGxc738elmYx871cfHiILYLp3XTefiTCCd20nl7p83oASbPfRkhPxl2VKtm+s0VLqMWBakMKsPsL/a63HOalzveCTPvQh8X9TPRBNy0IsTfz/Jy+9thzutjMH21WoE6Uqnd33S8h2G9VfbrhbVhMpI0ft41nQ/ef4XjhoxpU72vjHb5FFz9GOumXx/XPdr13M2Y2Z0QQKRyt/Kwk0rvyw3Wkjv6HiDaZyFdBIKq5Q+12TUm0Hoc1pS/nnl/e4K/ACld0A08OV2iHPYIkfJCjPQ8ip6+BuHxUzz7FhXJuk4z/pLSpa0qu+eOvZa0HW/w5Werm/HXm9sdLuhrkJMkcFzJm9tsBDB9iJ/RiwOcVWDw8tcWejTLNayPyaptFj2yNN7d4VAakPTI1CiscXlzu80LGywsW9I+TWNob50bV4SZc76PUQNMGsKSXy8PsXRsEhFHcv2yEKd3N1mx1aYuIlnyjc3dp3o5t5fBa9/YnNZd581tQY7vbLDu7cWcNua6Q8JfQDMOK5xxQ9y1wK7cE/fhdOorgUpyR9+DEBpGZges8m9Jyi+gaFbbyGL8VNEawdenQC8hRHdgDzAGGLfPMUuAy4F/AyOBd/4XeyV+KL4qLI3rqEhXNWUXzZoUD7JiQqt5wyYDIp4tMTI7UDzvVnJH3hW17Gi7j/qNT77i6+Ig957ix3VhW5WLocOSb2wWbrAYc4SHOetshvc2aJ+mMe5IZbMx5giNurCkV5bGoo0WEVeS6oFtVZDth3tP8zL5zRC/6Kzz790uXl0wrI/OR7tshvUxyEvRqLfUNOPAPI0z5wTolqkx+8sImgCPLph8goebVoaY9nEYIQRSStqnapzdy8Mn39Zwep+2zX6Bslz5/P7RuK5D4QxV8nWDtdg1JUo7J9qwSgvihNKOqBJyAm2NBIe1FaRUTg66gXTdKIfdHOcw6bpknXEdsQwKUmJkdYzy193IlqRaWhEfrN1GYRP+2lUryfQJFm2yWb7VxrKhe6bg7J4muSmCcUeavF/osGKrTYdUjTe22ZzSzeCpzyLURxw0DVI98KdTVWvEinFJ3P3PEMu22Iw7wmTFFoehvRR/GQGJ48LJXXUGP1WPlNAlQ+PzdRYIeOgsZbr9/PoIAoErJalegc8QHN/RYGdNfZt71aakpLLmvlFIoe3HYaHy3biui0QitAPwVwIHxEEze7T/YSLwBmpMe6aUcqMQ4k/AZ1LKJcCzwFwhxFbUanHMwb7u/wJsR5LeoRs1e7+NejdKtfKQEiEE4dIdVK54FKEZpOV1Ihi2sKqVaa3Sgona1bThl+CXR/Wgr7GLntGS4s5qyVWDPMxZZxFx4Pn1EfyGYN6FfioaJGcWGLz6tcXKrTbPDPfzm+Uhzu6hU9wgqQtLUjyQ5VdTRBf3N3jwY4urB5ss2mST4YOSBokuhOqvQFG2hiKl50b4uWhhAI8uuWKQSUGWGuV+fp2F1wBD10AINpU5DO3lo8qJ8E0bZ79Ss9pRcPVj7Nm5BW9OF4Co1Ur0ACEAofS+ktU1VL71BDIcbKZ95DRU89m9I/GkNerotEUT8v8iEhzWdtBNE9cleoPWUBw2TWW0bIviuVPw5HbHKt+lPAGr1RSfEALtEPDXsX27cHRycZy/dtdKTu6qs2SzjUdXEhLbqlQLRUWD5LRuBuNfDZJkwrRzVHB0WneD0QNMVmxVM9WUFQAAIABJREFUzfqdUpUv45gBJvf9K8x7O23CDszfEGnGXzEK8OqQ7gEHwYzhPi5YGGRoL4NfdtW5oK/BK19bWC7oQiAldM3QSfYIRg3wMPuN+W3q1vH76fOZOmFYixwWm9QWQotOqMpmHOYGY+VcFVg35bAEf7WSzpeUcjmwfJ9tf2zycwgY1Rqv9b+K4vlTka7L3tm3NC4SHXs/myE0DbtyD059pToWdVxySgpQ3+rX9dK7awjUuvxrV4gdlRbtU5WGTa9sjZO66Dz2SYRLBpsc11GnMiijMhAGG8tcDA2O66Aze20Er65IaPoQP/d8EAYJF/b3MO0Ti7nrLDqnaeysdhECpp2rGlbDjuTe031MXB5i4iCTvGTB+f1MXtpkcfXRJrYrGDPA5K3tDoPzBf/81qU2olHUoLFgQ4S8tHqG9/a3efZrX9i15ZQt+SsATkMVIur1KF0XoWm4DVXkjbkXPa3RpNiuKaF8yQNkD1OrXF3XqX/j4UN2zf/tSHBY26JixbTotK/L3tmTVH8XEqehqtlxQjTlL1Wmko6NNyUdaP0gbOXHG3GCLh/uDvFtpcX5fQ3++a1D/3Y65/Q0eGOrxQmdjDh/ZSUJzu/TyF8/66AzcmEAW0KyCX87w8c9H0Soj0iuGGxw1twgEQc6p2l824S/pr6j+OsfQ/3csCxIVQQmHmeS6hVc0NfAZyj/yCsGe3hnh8MJnZTUw6rtDrtrJY/8O0xWsk6KcA85f0EjhzXlL2jCYYFqOlzzdHy7MEysit1xDkvwV0Lh/rBCbMpxb2Ude+67Bsd1QWi4VgTN9JB38Z/w5HWPB1+uFaHo6WspWXgH5f60+ErKaagC18UQaosVqiUjpW18HbvkZVEkXKrqgoDFLzvrvL3D4R9DfVy3VOnhLN1ss2yLjZTKt0yLGmRn+QW3/sJk/oYI+aka2X7BwHydEX1M5q6zGH+UyVk9Va/WH0/2cs2SIIYOX5Y4JJnQNUPjkz0OOUmCM3sYlDZIRvQ2eHu7jaEJUj2CqpDg9O46b21zuKCfh5e3eQl41ITQjhDsWBcipfjQ6n4J3SBnmNJzK1/6IPnjVRNqTPm7YulDqk8GomVjEAgMw6Rjt17x59l+yK44gQS+H+6bOJb6+jpqKsrj5fZYaT1v7H3g2HjyClQfl5TseeIK9s66GaehCs2fBjTyV6zR2wrV4U/xt4laeoecdGpqBbbjIqlm6WabrCSNaeco/tpe5VIesOMm2eVR/uqbrTH1nTD3nObhla8sMv2CTB8MyNMZ1sdg0SabywaanNZdZ9kWyZ9O8XLla4q/vqpwubCfwYubbAoyNLplCPbWwxkFBgELrhzsYdIbIWpCqnfs1O46L2606N9O54L+Hl7bqiENjYAnDV3T2syt47sQ47Cm/AWNHFa68A51XIy/hLYfh/2v81ci+DqMEJtyHBB9vH5HCf52ndj8yGVxJXvXarr6k0p0VQi6X/kQwbCFMDzsnX0LhpCceuMDgBJubSuPxyUPTqS8up4Tr/wLHl2wdLPNyP5GfFXoN+CVMUnsrZN0StM44h/1FGQKHCl45OMIQ3oZZPk17j7Zy73/CrOjyuVXXXXGvRRk7roIfgN0ofzUkjyC9imCGWss+uXo7Kx2KKyRXHOMh+wkgUdXFkOndjM4fkYD6T5BbVgSsaF3jsGvz+7LvxbXM/7+hW3apNoSdF1vFJ30pRBd+schDDNurC1dW2nqVO6Jz9glDGsT+CkgNuUYw56dW5Bo7J13W7zPy7Uap641fyrtJ0xj7+xJ5F/yQJS/JsXdHqBt1dKf/v14enRqx5+fXcre9f9k4fowZ3TXm/PX6CQKayVd0gSDnmpgRB8Djy74ZI/DU59ZJEd7vO79IMpfXXQueTnI7C8j+AxBll/QJUPD7xF0SBE88WmEIT0NcpME01eHKQvAFYNMcpIEqLZdBuXpisO8gpqwpF2yxhPDU0jPymFtbT0//8UvyD3/j20mNdESWuSwKIRuKq6Kc5jbjL/QDLQmGbIEEsHXTwL+9GxArTZ004O0I/i9JsGwjXRt7IYatjw6ockZAssOx9Xy2yLjBUpkdcKfZ7FjTzmhYICCTMHeOsltv/SQ6dO4eIDJ65ttfjmzgbCjem+DtuqhmD7Ey6SVIZ5ZEyEvWdA1Q+P8vibLttjc9gsPw/sYvP6NRYOlypQvfWWR5Vfm2mUByR2/8jBpZQiAxRstnvk8QkOk0Z834kgCUR82rw5zLkohbNcxvJc4JGn6WAagqryUPTu3NNkjELqB0Ez09Ny4qGqMx/S0dqDp6p/rYGR2wK4pQU/LxbYtindtJ79zQZteewIJtCZ8OZ0Qmk5SfgHB0kI004u0I/H7cqR0O3ZtOXueujq6RWBLhzX3jSKjXX6b+QMGAw1cfd9cvIbB7qJiTsxTBtfXHOOhILORv34+s4GIo+KIkC1ZtMnC0ATTh/i47nXVn/XkZxHO7GHw+mabO0/2MmqAyTOfR5C2ZP5IPy+ss8jyCU7qahC0Lf6922HmCD9XvhbEo8NLX9k896XVIocBHJGrk+4TYAUZ1lNj1cZtnHN+m3wsQCN/Ac04TI9pekU5jFhAFZWVAMVhQtPiw1+x4SIhRILDmiARfB3GMHRBsGw3Vn0VjuuS5liES3coW4roN9RpqMHbrjNdL/lLs3O3PzahzbJdMcxZ9hHbtn+LV1Oj2Ee31zE6CnL8GraEdsmCa472UB2SDO1lMP6VIKYGI/oYdM/QOL27zrItNu2SNZ5fb3FuT4MJrwZ57ssIlgOGgB7ZSjX6o11K7f7t7TZXDfbQMU3j7J4GjgtXH+2hKiS5cUWIfwzx8crXFos3WaR4wNSUWvUxT9agGwFSM3MOSZkxlgHYOGMKFUsbZSLCtWUIoUebj6MQQmm0uQ52TYnKEDg2wvCgmbHAWQVtjnNgd4MEEjjcEMuWSNchULwdpCRSuj3aOqGasEsX3Y0npwsdL7kvfl64vJDwqkcPKuPVUFfDnu3fHHD/p++uoGFXEZk+SbZH8sY2h+uO8WBqYLvQLklw3bEeKgKSYb0MLns1iCvBo8N5fQwyvIKsJA2fIWiISKqDkle/tpm/PoLlKs2ujmmC+ettFm5UWbDVexxO7qpkeFK9gnN7NXKYEHDDshD92qkM2D+/dchN1the5fJpkcMxT9Wh6w3kZqbS4LStvVDTDGZTDrPqlbqKlDRymBCNQxFRDpOui9CN5vxleBIc1gSJ4OswRihs4UqwHTe6RcQbVQVEsyYSNxxkx8zmgZZtNW9OPZBq/o+1HSqvrufltz8h2y/57Ykebn87zNoSlz21LvM3qOVasgkpHqXcfMUgk2G9Dd7cbnNhP5PO6eq9ZPk1nhzq47plIS7oazCqv1J4PiJXZ2ulw9STVK9X2AGPIUkyNa45xiBkC8YdYfLbVWECliTbLxh7hFqpXtDX5LWvldXRhpuymfVFiKfWe7jpqTcPeblxX1HBz+8fjZmShS+nE5FwCARohkeVYjRd6X8ZXkoX34WekgWoSUfp2AgBxQvuIJyTC9BmGYEEEmgNFO/arv7GY9WpmDJHvFol4w/cSEBNAUchNIM0b6OubdNMTFMcyKYmUF/H63+/mVvPO0JNOe+DmvogK8u3kJ8s+d3PTSauCOEzYOFGi2e/UFn0ZE+Uv/yCXx9rclE/k9e/ieBINcjzxjabEb0N3i90eHqYj18vCzG0l8ELGyzSvNApTePo9hqzvrRwpNIE+1lHjTVFLg+f40MXKui6flmI2rBkcHudYb0N9ta5vL7ZwtQFv+pmcH5/nZkbNP41YyrZ6ckA3Lvw3z/49/Fj0ZTDYsr09SEbX47y69QMNZUa4zBPThekY1P22l9VsBXjL00H6cQ57H+dvxLB12EMF4GR1QEjLQdh+qhY+SgQE7IDIyUL6Ti0v3z/qZFd08c3e9xUNb8pWjL5/j6Ys+wj2pkhft7VoGeWCoQ0XXBiR50rXgth6pKgJSlrkJQFJKfPCWBoMKKvQX6KoLRB8u89Dqd3N3AkDM7XGLM4SMRRshH3nuZl4ooQXdM1Rg0wmfWlhSYEoweYZPs1wg5Uh4Q676Ugjqsa+PNSBJcfZdItQ6MqBCc+XUVmkkGKcP6/TAXFsHHGFJxQANd1cIUe18txGqrY/fhl0SkhHc2fihOoxkzPp9Pljb+volmTGDjxiTbtf0kggdaE4zjR8noeQJy/kOA0RDMojkP+2Psxczo3O7do5kTw+uOP9+0li+FANjWuY9O+YxcWbwq1uH/9urXU1AcZNcjkmI4mF/RTgwH3nJHE6l1hxiwOkOzRKA8o/jppZoBMv6A8qPqzclMEH+5y2FHtcl5vxWGD8jVe/dpG18BnCB49V/kxnt1TZ+VWm2y/hu3AkN4GvbOVtIWUqrfsohcD5KcoB4+8FBHnr6c+C1OQbeJFMHvph0y+5Kwf/HtoLWycMYVQeSkAemo7CmfcEOcvoBmHCSHIO/+2uDRFjL+gbXv4fkpIBF+HEXJSvc2CodLSarSkDDR/Grkj70QzPRTPn0pTYW2hGzjRLJdTWxpXg5YSvOfchW1FVIZMQHhLEaahavSGLujXJfdHXWcs66WHwwzp6SXLL7nsKINLXg5xSheN8/rovL5dJy/Hz57yOmzbZvQAg01lLi9tsqMq0Mrn7IK+JrarJnw+36smNH/W0SA7SXDpQJMFGyxG9jOZt97C1JSJ7cKNFgKoi0gsR8laDMjVmL/eQhMaZ85twKsLHjnHxy1v2dTpmei63mZTQS2tyqvKS5tZAjmhAB0mPELhjBviQZUVCWNXl+DP7cKuZyeSO+JWHFvZDGWdfQPB0kKEpsVXmAkkcLgj5gkI6jsgpWqqN7M7kT/ufgBlL9SgjleGzBauFcauKSWeCQvWUhGo5tdnD0JoOlKAsfUrAHTDQNf17+wbSknP5ORr/9TivrrqSj55fxQ+XXJuT1U9uOIogxtXhCistmmfIhhzhIelO3SkZmPbNmMGGFx1tIez5jbw5jabBRssJMpr9oK+Jk4TDnMcl+M7GRzfUWdEH4PtVS4+Q3Bagc4nux3e3enw/Forzl8xX8fiepf8FI0NJQ4eQ/HXpJUhQsKPz2vw3prNbRZ87cthVeWl+1kCOaEA+WPuoWLpQ3TYh8PMjDyK5kwmZ9gUzKyO7J0zGce2CZXvTvDXAZAIvg4j7Fv+855zFznn3Yonp3M8wJKRALmj76FixTRkdHqo5IXb4h5amj+N/DH3oCdnUHDNo+yYOZnsoZOpWPYQZtTnESBYtvtHX2cs63VSNxOPDpUBiRtd+Y19SZUIB/TswKrHJ1NeXc/5tzzEqP4BPtrlcHR7yfiBJpNWBjmhk0F+qiDiQPcMjSE9DZ77MsLdvVTJYcJRBhctCnFuL4NTuhp8tMumXbKgKqi815T/o8bMET4KqyXv7XRYPi6JG1YE6Zau0TNLY9SgdHb3ubRNM14trcr37NzSrNcrBs30xW02bNuK67RJx2ZvVMtNOioAE5qO01CNJzUL3ZfUZtefQAKthaYlwKkThlEfsskcMhk9vXGhJ8NB9LR2SCuMG6ylfOlDCN1AOrbir7H3oidnkjPs/yhf9iA5Q6dQvuxBPLnd44bN4f2MuL8/Pl2xkE6eWtLaa2QlaTREHDQBg/N1TnmujvapGqUNkgE9O7DgL9dy0ZRHGHdUkBVbIlx2lIfxA02e/SLCuhKHYzoY5CQrX9kYhz35eYQrBnvQBFw60ODCF0OcVWAwd61Fmlf1oOYkKf7qmaXxzHAfpfXKZmjZuCQmrQzSv51OQabGuCO9fCXb88oDP7w15IegpSlVb06XFi2BdF8SRbMm4Tg2UspGrUnXpmThH6JHScqXPogbrEvw1wGQCL5+YrBryylf8kCjB6BjY2R1JCb+VTx3CpGK3TgN1Wx/9hachirKosdHqktwNI2Udh0O6hreW7OZTwtDrC50+fuHjX0bQsAx7Q0GtddZb6tGyznLPuLE3CBCwPTVEVZd5seVSv/l+XUWL26yCFlKIqLBkqSYgpxkQWXUZHtwvsboRQEyfIJkj2DBSD/LNtus2m5z2VEm5/YyqAur5v6xA0yeXhOhKgi/PdHE1GB4d4tJh8DDsXjX9maNpKVLH8GuKeOzv4xE6Dqu67Bj2jiEZuBJzYofJzQdI60d2efeHC+97J09iXbDf4trRyhbfBcpPgOIsH3Gjf/zfRIJ/LRRPP/2eMkxd/Q98Yne+P65U4iUF+I0VFG84HaEEFED7loi0YDL0fQWXc6/L7Z88SGF3zYQsSze3WERlThGCOiQpvPl9Rn88Z0G1tte5iz7iJM7RrBsl+mfRnh7vB9HqizXsBcCVAVtFqy38BmQ7IlymEegC9hQqnp1j22vypEFmRrTzvWxbIvNv3cpP9sL+xmEHchOVq0bz34RoSIIw3sbZPgEZxZovLRiJxU1DfF+r7ZATUV5s6lsx7bZNWsydo2yRwNwXYeieb9D6CaelEyklNGpRw09LZf24/8OKP7qfMU0AsXbKFt8dzP+gkSvagyJ4OunhOiESf74Bymedyue7M5EKnYhDCU/IXQToRmYWSrNm3fJA/Ex3+J5t2JmdcKq/PEZrxiWPDgx/vN5U6ZTuLeU2rp65l7gI9OvURV0WbhEEcZbq7/ii68DPPuZpFu6YOSLQaYP8TPtHB/PfmExZ22ELukau2tdXAkNEThzbgBDqHR8VVAScaA2DFUhyQULA6R4lO1HZdDimTUR6iOQ6oVkU2XRxh9l0iVDoyIgyUnzMrwXbd7v5ThOvL8BlCVK3ug/xx4BUL58mkrLZ+THy8Mx5Xrp2kTKCxFCNQf7cjoRLi8kMyc30R+RwH8FpJTIsMrcly99MM5HQjeRjtWMv7TkTNyGKnJH39OMvwDsyj2I/Xvovzeue+B5ANa+tYhbjmzglocXxjns8aE+NpVZnN3TYOGSnYTCNhu3NvDYvyy6ZwguivJXTpJgZH+TBeuVQPTuWpd6C1wpMTQ4+/kAejRCtFywHMmmMpfzFwTI9AnKApJdtRaz1kYI2UrGIsvfyF8d0jQqA5JMv+C6n2e1eb+XK91m/AVQbkfIGx0r3SoztxiHGdE+vqbK9U35C0A3zAR/fQcSwddhDI8mKZl/e9wzuxGyyf8Cq2KXkifQdJyGSornTAbXjnujAWjeZIrnTMZpqKQ2NROrvgotJ+2gNcCWPDiRh+a9CXs+5xf90+PbryuvYfbSDznzuH6cmFPP85+WcUwHjc+KXEa+GMBrKKPYFI/g7lO8THojRKdUgc8UfFXmkpWk0vO2K0nzCR4+28eEV4MIJGUNKtPlSNXYWhN2STKFmgasd3n0E4vHPnUQmk5qpgkcehV7kKDpGOl5Sj4CpVIPIAwPuK6SlwB1XHT1b1cVHcJrTCCBtkNKSirVZVsomf97mtGX08QsWwi1IGzKX3Mno3n8uE2shzRvMsVzJyOlxG2oRggOemJOGAbz/7mJv00azbL3v2iRw+jYjxOP7MGsV1bF+WvUiwHMKH95DcGfTvUycXmIjqkCv6lRGZSUB9TgUEXAJckjeHKYn5tWhHClJOKqMqWUkGQo4dVdtS6OlJTUS6Z9HOHRTyxMUyc3MxUI06Gk7fq9WoIdCaM4TIUIZnYnrIrdjRwWDZjjKqoxrkvw1/dGIvg6jFG3/O79tnnPuYtknwdNCIQQGOm5aKaHSPkujPQ89JQs8sc/xJ4nJjQ7L3/sX0BK9j53E6fe+ECrqt6/t2YzRaVhXlhf2mx7h5LNAGzYVssZXQ02ljk8dLaPK14L8spoPws32qR7BT2zNK49xkPQkvz+l14eXR0BqYxo85J1ftXVoGeWxpBeBku+sblysMFVgz1ICVlJghejDfiTf+7lj++EeGa9xq1z/nXIZSVahiKnihWPYteVg2MTKWlurOEGqqOr/oNYzieQwGGGliQgpk4Yhm6aCCEQmoaRlgfIeIZeT86i/eWP4Fphip6+Jn5e3pi/IITAtcKUz/8dKT7joDMqmqaz7IsiahoifPPNgTmssLiCMwoa+euG5SFSPHBmgUmKV3BGgcGF/VT/68TjPLRLEjy6OsKC9REG5Tfy1wV9Deavt6gLu4wbaHLVYA9ZfkG7JMHjn0ZFZwW8t9NmS62PD2fe0aalxu+C3OdxyYI7cIN1yvrJsbHKdjYeKZSXY6OmVwLfB4ng6ycGTVPCq9Kx4034rhWJi9s59ZUUz/st0nWJlO8Cx477bQE4gWo2PjOlVVXvm5Yh90V5dT0X3zqNJG8Nw3oLftZR5/y+JjO/sFhb4jLvQh8SwQkddS5/NciTn0XQhCBkS5JM8BmS4X2UXdEVg5QC/uJNFi9t0XEijbplliN56OPo5+G2bZlx38mgmopy7Bd+jwA8UTNsp6FamQTXllCxfBpOXZkKrvTGr5wQGlK6SMdR2UpQq//6KopmTcKqr6Rztx5t8h4SSOD/F3RdRzp2VJgzepuP8VdDJUUzJyIMr7JU24e/cG2s+kpScg7+exEJBRGajqt5+P2VwxlYkE+yvzkvNvKXxrDeguM76ZzVw2BDqc0HhQ4LRvoQwIRBJufNDzB7bQSByooZArxN+Gtkf5N3dzqUB1yWbIGXvwrguo2BjuVIfKbAcSEnOdxiqdGyHU6ZPJPUJA9Dz/1xi7WWprOl47Dz0Uvi/BUfBhKCimWP4NSVoSVnNuOwGH8Z6XlIx2HvrEkJ/voBSARfhykOJIoKcGT3PGpTUtjz5JVxR/mYmzxCw65WZa5YXR5Aiy6rdE1rMePV2iKsMcxZ9hGndIb3NkvmjDDRhCKq4S8EOKenTmUQDE3SIVWpST/xmc07T/2eEVMeoU9qmOM6anh06Jqu4bjQJ1vj2xoJDrx8aTZ/eLOasgaX8gC46AgBptl2shLw3dONMS2bz+67GDO7E5HyQqRrN/a5ZHdGCIGUUpWLUX182UOUwGTJwj+Aa5PiM0jJ6dFi9iCBBA53HEgUtaainILOBVSkpFP2wm2Ea0qVlEScvwRCM7BritUwSlZMpkCiCYFTW0JmTm6L34sfKsR61OkXMfD0C9k+/w9MfPIdpl58PBee1L/ZMfvylxBC8dd8i3N6aJQ1QFmDi6nBqAFKNmfFNlWW65cR2Ye/XIb0Mpi91sJyYN3/9aCqqpKrXqlhS4WLz9RBgK5DrSValJb4YvNuBp13NY4U+JJ+XFasJf7y7sNfe3ZuYe/8qRhpeUg7TO7oe/DkdKFo1s2Y2WowKM5f0ax99pCbE/z1A5AIvg5THEgUde+9V7LxmSkEK+sAQe7oPwNC+WilqXHuPU9ehXQsiufuL6Cqy5atHVpbhDWG99ZsZsO2Wkb0hFpLo9YCj6lzYX8Pi792+WdhmDRf4+xSTorObdMX4SXCJ3tsPt4NM9ZESPUK6sOSypAkw6/Tr2MSvXv1YGEvdd70D8rZ3P6CVgu4vovIYf/pRse2CNeW8cVfx5KenYNrRyiaORE3WKsOcB00j5/i2bc0nhMTmnRdPLndAdCTM8hI8SeaVBP4SeNAoqhr7hvF9hk34gcqa8tQHHZPM/6CKIdJSfHc5gtFGao7YDblhwqx6oa6/X29u4r8rj2Y/8FmPtxa2eyYV1eupqKyir5ZLn/+QFmo+Q04vqPB0u2ClTttcGySou21OckGJ3YxeXNzgA9qXd7/Fh5bHSbJFAQtNTyU6tXplGmwqSTC5nIvv+qdjbcwhJXehWMGNg/+pjz3r2aPSyuqyTqhL0un383AX5yB6Wm5gnGw/FVVXoq01T3EaagG18G1ws04LMZfenJWnMMS/PX9cVDBlxAiC1gIdAN2AhdLKataOM4B1kcfFkopzzuY1/1fRvt2mWx/fjIFlz5EYVktRnp+i8cJw4OmG7iOg9NQGV9dugi859yFpgmO7JoTz2rtLaui9LFb93sep26/X+cPwpIHJ3LelOl8UFLOB8ub7jEwzRBpaT6ab4UvvtlFTloKeaZLZXUtM0f4yPQLqoNw2WshygKShm8bOOnx5pObrdlU/11EXlNRjhEOx02xpWsTs35ypRovFwjcUD0g0PypoOnkjrpbNdsDSBkvNUqkKh0ncMiR4LBDi4x2+fEb82+GHYd0nGYZ+qYwUrMBsGvLGrNjwK6d2/jNsOMwNMGjSz4BVLAREwZtCt2XhH/fJ94H5/3+qQPu+914eOrWS6kuLeTdMsmK3ZF4mTC1vQoWg9WlBKLbCsPwzTc12K5OUmo6dn01v/25h1SvoC4MD3xkURMRVBXbXPJyAF0T+LwewCRFpNJ3/H0tXUYcfaP/H3HCad953MHyF6hJRsMwcAA0HaGbjRzWhL/yL3+YvbMnJTjsB+JgM1+/A96WUt4vhPhd9PFtLRwXlFIOOsjXSuAAaDrVKF1FDXkX343QDKTrUL70QfLHP4hduSea1teRrsP6l++Kn+cKnW5X7G9TtP2xCQd9fd/VE/ZdiE1RnjKwcQLp5uoaFu1ux5UPvXLQ1/VjUF1WjCPVJKOM0bCmI4SG0DTajbwL3TDIt21KF98VHwb6T4j6pCPE9zwhgdZCgsP+P2PfRm3XCqvv0nDVu1q25K/N+AuU92PJ/N/Hz6mvryN/zD37ySUUzZoEvoO7zcWkKX4o3pn/BL33vsL4k3Li22r8rZuh/zFwbGs//oohxl/tbIvSF+8kxWcQronu/A/cJESCv34IDjb4GgGcEv15NvAeLRNXAocYFSsfB+lg15YD+0sYCN3AdfedaTm8cKApyga7mOqKUjKyf5w90sFACg3dn4qRkd9M/8OuKkK6LiUL/4CRloNdW6Z2CAEN1ZQveUA9NLxkn3MDSNT0I2qMO1Y+0PVEJ8AhRoLDDkNI16X01fvixszN+CsaOMj9ZvIOL2z54kO+KA2xcF3bZeh/DKRQnCMde18NI0oX3akGHdSR1FQojmrGX2erakmMv4rn3ILm8WN6vAn++gE42E8qT0rDFRhRAAAgAElEQVS5F0BKuVcIcaC7oU8I8RlgA/dLKV890BMKIa4FrgV46tbRXDviFwd5if/NEM0ngZpA2mH+H3t3Hx9XXeb//3WdM5OkbdqmSdokvQdFV1HXVRf15667Lq4KVlhdXEDuKgt8F61y012Qe3URhH1QQYpoYZUWaimgriygiLu67Oq6incoooJtaXPfSZomaZPMnHOu3x+fMyeTm7ZpczPp5Ho+Hn00mTkz5zNt8+51Pudzs/j8O2m5z92fL1u0Eo0UDXO0P/JJN204jCh/zycBCMKI7fdfzTHn3DSVH+CgDtRj9l/P7uDrz/6Aqne8f4pb5HjpClo3XY74qWSqUrivk1RVHYQhDefdTst9l1Bz8iWUVS8l29VG/sD2rdfStvV696IoIFVVT9Tfm2zj4bbhsO77KTShGVaYX2evvZG3n3LmZLS5pGSHbxUUb5tWf846vHQZjXed67YWitfGa3/kk+hAHyB8ZNUJ7iVRSOqx21m2euSWXsVypD1mky4Mad14GRqFgxkmAMqSj2ykdZP7P6PmpI/jp9y+lYX5lXl8HeG+PWgYkq5yt4zzGWb5NXaHLL5E5LvAaAOLrjmM8yxX1WYRORb4TxH5lar+YbQDVXUDsAGAH945vS9tJtHwTbYLH89Lzaul/eEb8CsHt6vRMEDDgKivm5b7LksGRWYzuxDx3J5qA32uG39PM/MbVgCwt+UlMo/dRt/uRnJBmFxThmHEsWevS849npmPE+Vtr17Gl77/AyhS8bXwvZcQRkpZ7fLkKrF10+XUnHSJ288sNzguJFVWTjR/EV66nCg3kPxdNZx3Oy2b1uITkevvobI2/39+1rbfmGBTmWGF+XXP09tmbH4VbrA9/PHhOh4fWjAF3RmIAlo3/xOCxMvm7BySXw2rbyfbto3ZDW7wfV/7TnY/egsDmZ2EweAirgPduwl6hGtWrzrgrMeZxkuXUX/2v5DraiVdszQuvoSWr3zM7RGsOmS0RLqqzm2Qni7DmzWPxeevp2XjpUi6gqi/l1QqXZBhll9jdcjiS1XfeaDnRKRNRBriK8YGoH2041S1Of59m4h8H/gTYNTiyziHKnJq55bT0pkBjag56ZLkcUFpe/hTLDrtBlLVSwj2ttHx2DrSNcuSqcGjkXi/tNceU8evtrcxa6Gb4t09v4bjL7yVH9xzA03bm5NCrLAdU12QpVI+89IhuezAAWf7HMhYp6Mf6D8Pr2D7DC0YnJqIQoK9re7qXSE70O8ezrllPDQMCPd30bLxUoKeDPMXNdiU7ElmGTb1xvLvOeUJ2Ygh+QXQ/vAnWXT6p0ktWILbr/byQ+ZX3pKVxyWbQgOUz1vI69bczXP3rmXPjj9wzepVQ44/2gqy8eZXZeVc9mTaB4urYZcHGuYgchffqkoY5JD41mQ+w7Lt2wl7O/FmzUPjossy7PCN97bjo8B5wGfj3785/AARWQDsV9UBEakF3gbcOs7zznj5gueEi+8i8/2hS0T4onQ86cZ8aRgQ9ffQ9IXzXC9NFICXom3L1WjQTwYPz/fjWZFdfPem84nEJ13pVocvq3BzhbL9fTSccSPHHzN0ZtJ4l6I4Um97eTUv7trB0pe98rBed6jp6IcKt2tWr8L3fcJgAA1yyfMaBW5fs3ibjahv5HsMHgy+CCpiU7KLzzKsSD7/6P+5n7fvf2HI4xKvSdj+8A1orp+oryC/AFBaNq1Fg37AQ3wfjfPrZzd/EBWPdNzD7G6DQdi/n/ozbmTJyuOGnOtAy1BMV2NZTuNQGbbmfW8h7G5zRVaYKzhCED8Nnu+2cEI48IwhSZb9sKLryIy3+Pos8JCI/D2wE/gggIi8CfgHVb0AeBXwJRGJAA83XuI34zyvOYSwJ8PSj3yFKMglG28He9vIPOYKNY0CGlbfiYY5vFQagOzuHXQ88kmCXJaot4vyyiqy/X18784r6O/ZQ66gO7/Y/ur1K/iPbz9x2MXXoRwq3Cor59L75Ofc1WOFW0KCKMSrqCRVvSTZ+8xVWCkkVRZvSVtAiBchPA5TdJZh01Dnk3ehYY768z6XXOQEe9tIVy91W6dFYZJfEudXbvcOdj/yKaJclmzvHsoqFxD27+fZ9ReT7el0s/xmgENl2JIVx9D73c8PZhiA57slcUTiDNPkzomXKhsxtUEEK7zGaVzFl6p2ACeO8vgzwAXx1z8EXjue85gDG21x1Gh7G41broX4qsZt45F/Ml5Yr3CQfjI9WFyPVxBSe8oVzKtbykubryEa6HPvA/xqe7x6vi+8avnUzzbMW7KwirDjmSk513P3rmV/S2MyuBfiLZ3CLrdsR5Al3NdF4/pzAGjecCFRLkvblqtH7NeoQQ4/NXJfusNdndtMDMuw4jrQbhHt37wVVR26CTckPTWFMx0lGQogpCurCYIcC0+5klmLltO0+Sqi7H7ylz9NO14A3BZH9cuOnZwPNc08d+9a+tpfQmGUDNuD+GmibD/ip5IM0zCgecOFgzsPxDTIsfuRT44ovCy/Dp/NCy1hs8rdFWHfQI5IXQey56cgXUGwp5lcZ6MLt3zxpRH98aKqGoV0tzUS7O+h4dx1tG6+gtSCJcyqiN9zd+Nop5xSyxaU0d2ZYV517aEPHoewfz/+3BqWX3DXkMf3t24j89htiJ+i5r2XUxaPM4lyWYK9rWQevZWGc9cNGZfWfN+lVI6y7tDhrs5tTCnLL1mQLisnlx1IVkTw0m7Px2jfnlHzK9vTCSgaBfS17yTs6x6SX2XlblHngeEzLEuYy69a6s+6dUgWDWR20vzVq1i5ZiM77jyHpR/dlDyXzewkNb+ets3/xLLzP588ns+v4QWV5dfhs+KrxDy/s51cEKBRxN6WlwDo+PZ6NBgAhKhvL6l4PETnd75A9Tv/XzLjsbutkVRlNapKasFiPHED8VV1xHow08EH3rySf/3xU7zhPRM7nf+5e9cS9u9Pvs/2dOLNmkvjA1eiuf7k8fys0vy4iMEB+NPvz8qYo0Hrrm3JVjfgLnBA6fj2XWiQRXw/6YnpeOw28FLJ8hJ97TvxKxdAnF+SX1B6mubXZOna3Tpkpf98frU9dP2I/EKhcaMbtzvqBCIzaaz4KjFBqMxrWElrvG5Lx7fvIuzZ7fZPgyFXim78l9Ld5nqxNAoJejtBBE9cz1nQ00Hb5isIejpo3nBhch6NQlp8H0/Doi1F8aY/Ws4d3//xYb0mPwuoa3crWjBz0ROPa1avomt3K6m5tSxefXvy3K6vXOKmYGf7aFh9R/J4lBsg3NtO29Zr8TyfoKMJyG/X4eQ6m4hSgz9mud5OKmtH35vOmJkuDENm17/M7c4RBsmFY9TXPZhh8XY4qeoltG66nL5214ulUUDY2wniISKky8pHyS9Jjm31U4hGQ2ZATvfbZIfKr8rKuah4I/MLiLJ9LC7ILw2y5Dqb6Pr250G8JL/cwQGuBzEc0kto+TVxrPgqVWFA+8OfBMCbNW9w/zQRUCXobCTc10XmsXXuViQQhQGp6qUEnY1E6m5XiudRf67bdkjDHMHedkBp23I1zJqH7/u098ezInt6p/xjzom6D/jcoTaXnf3uy4ZsLgsQffVq+jub2XnvYBGZbI6dD+4gm1xIa5RDo5AoyAKKn0rjx9ulaH8Pwfe/QOGoFRukaswYxD9g+QvHzKO3DtkDMuhsIuhoJOrrJvN4PImoIL9UlVx2YER+AYTd7bR+9SqkYi4iQm+/+wn1K2bDKHlRLEeSX7sevJYw2z/G/FJAk15GjXL48eSFKBK0px3t72Hgu4O3HS2/Jo4VX0e5/GKsLZ09RJESRhH+7CrwfPw5C3Arf6mbQhzTMBdfHcLyv72SIHRBlw1CyqoXs+vu82m9fy2en0pWlQbiHjMlXb0Uf04VDefclowr2/WVy6CCKfe6ZVU0vvgblr781cDQwNqTaaf+DHe1XDjANj8OIQxDymsLB+Xi/lz8NAtPuRLxPCpql7K/9Q+A0Lb1uiGrcbv9HNOI55GuXkKusyl5X3BXo7aUhDEHVrge1d6ODEGYw59dhUYBqbm1gLitvOKNnSGfX66Q8GfPZ9H73G2zMAhIVy+h8e4P03r/WmSU/BI/hariz6li8fnr0SCbjIOaiH0gx2u8+aXKYedXas4Cd2u2YFkJL5XC930W1C6yDJskVnwd5fK3+Y49ex3HX3gb37vzCpZ9+HNs//Ll5C9uRMRteCqDG2/n5QuvIIh3tgdScxZQs2ot4vm0bb2OoLPJbUMBBWvDTI8NVP/mLcdxzbe+nRRfhQM/n11/cbLY4sEG2EbZ/TSc57rpc5ld7H70FtK1y8hl8os6ihtD4nkjrr47nryLKJel8e7z3ZH5WaQieDr0qvRgDmdFcGNKRWEvyjWrV9HbH7B49e3JrTJwxYQgiOcVrPXlaH4B0IJlcPw5C6hdtdZtvr312sF9IfM/m9HYfy6n2lTnF0KyD23b1utAQGTwz2n5y14xpnZbfh0+K75mKPF8EC9ZyX5vy0v4qTTZrjbCvm43i8/z0Tio0jVLEREG4h9o8X2yXe3kN9Lp79nDzm63V2SYy7K8YeQMxMkYE/Y31z/Ab3a08fh3/hNwV4tNO17A90fud3mkxEvhxbcSy8oryA70u+/9FGhI/dm3Ula7nGz7dmbXu6vTA81qPBDryjdmrATx3M+W+CnKa5ezv3UbXqqMoKvN3YosyK98b1dZvJl00OWWy8l1NKIakV/9K9vTQUd3xD+8+49Jlc1ifs3QDJuM8WDDby1ORX4ls0f9FGW1y5FUGUs/uokolyXc28asRa7ge2n9uWP+vJZfh8+KrxJTVjGLXV+5jKB3D96seW7xvLLZtGy8zPV8qUIUEvV1o1FI30A+epQof1UpwsJTrqDjqS9CTweIkO1odH1dcTHmrjjdLUgAb84Cak++lPkNK3jhjnNHrD0Go6+Gf8LFd5HpGRjx+FgLtUzPAHVv/GtqT7wIEeGZm06j/d9vi2ci9iRX0OKlWPLR9Qd/M42S9YOi3AAa5dh13+Vupqjno1HEjjvjdXAK1r8R8Q74lsaYsfMrZtN836WEvXvcop+eD6pI+SxaNl5akF89gJvVmMsO4AaH53vFhIWnXEnHU3dDT0fymmxHvDxOkmFRkl8wmGH57Y3Guhr+eNa4Gr5Ew7jyCw4jv0LE8+O9ZqfHXYyZxoqvEvO2Cz8FwA/uuYHu9kb3Q+wP/jWrguf7zFu4mP0DOVo3XwlA0NuZLDOh8Zglze4nVVUXX0WmQIj39JqLpCuGbk8Rhai65S1U4cnPDRZaXrqc+vdeQlvnyIAabZFYgO/ectGIfSRh9KJs4TGvouu3P6TqVW8D8Wk473ai3IBbEbvGhWvLVz4+5IqysnIuOzdfFYdSSLZ9O+CW5Yj6umn76pXJUhKLTr8REQ/ViNT8ekBp23xlEnSpBYuT2x/GmCN3/AUuC5KFQcOQxrvOHawP1PV2zVq4lNzAAJrrp3XzFYS9ncmm9fnZxvn86nh8HRoGcW+ZEvR0uNtvw7fXiUI3qzkKafu3W2iLH/bSFSx87yXsybRz85ozx7TGVeuubezafNXh7yUZ55cG2cGNrxnML3Bj45asOOaI8itdVY+q0nr/WvB86j50C62br0guptWWyZkyVnyVqHwR9tw9a9n2wOXJ4/mxYcCQDbR/f/s5yTHieex+9F+I+vay6LQbhqyA3/Ht9QR720jNcXuv5QPPq6hExKNs0Ur8OVXUnnJF8n7tW69l1+arAR1RULXs3sPxBd8/v7OdIFRyQcjO9i63Fk1c+Il47GyPmHXyp3jtitqkCKte8Uc0/8fXXfFVQMQjF1/thvu66HhsHbnezmTGzpr3vYVFp3+atn+7hdSCxYAL7kWnfwZwV8uZx24jNb/OjRvxU3jpMndVGT8PDNsfzRgzXvkiDFyPU37Q9zWrVyWFTuEG2tvv+FByvHg+ux+9Nc6vTyJ+Go3yP6NC25arCbrbaH/4Brw5CxARVBVJl5OqXjJqfjVvvgqAXcM25x5tTFPrrm1kBwaIclk6WhoHx5oJdLS3sOZ9b2HJimMO2SsmIiPyC1xGHUl+iZ92txvzeR6FBHvjPR6DbEHOm6lgxVeJyM96HO3x0Ty/s51sEJJry69UL9S893IQDzSirHYZzRsvA/GY37CCvt2NvPaYOjjuJr5z84W867Lb+M7n1rL4PDeNO8xlBwe2AmW1y4iCHKi6259A7XsvJ0qnhmxNtPMzf59sWQSQzeXAT7t2gFuZ+RwXxH66LF6bppmfPXSdG18WRTStv5qgt4O2F341eBWoOiJMXrfmbrbd+7Ek9ObX1LJk5XHsTlcMuVJ0m4/7rugr3IYpDNxsoSgk2reHVPVSND9LMt4PLT8w1tbDMebwHO6g7TDIJWt8JfnlnqCs7lhaNl4KIskYpoHMTpasPI6gwV1w9vYHLPzQLUiqLNmVIi9ds8xdVA3LL0QoKy9PZh7+/JYziTRKeqUAwlzOjQf1fPw5VUl+QVxQdTax86Hr+ciqE4iikK7bLwBcD9tgL1a+B2oww1635m5g8Pbn8PwCiPbtSfJL4jbklxdSjQj2NIHnE+7rIl27PN6VI79DABAFSYZ5NpxiUlnxVSLGOpA9X6Q1ZbqhYl4yi1E8n3TNUjcuIgoZaN/uep3iRVg1CvnV9jZS/tCCJgpyg7fcVN1+YfHjqLqNpfO3PePbBcO3Jsr3vgFkW16irHZZckUKxL1Ng/tTKuDPrkLSFXi5fhSQVDm5ZFam29R6KOWZz56BoMmV655MO8/du5aF772E8trl9LXvdGsGeT5ltcuJcgVj0eIirKx2eXI1qtn9BN0Zmr74924CQxRSXdcA2Ho4xhyusfy8FBZoHe0t8XIUg/kFkGvfQbZ9W5JfhYuwtu7aNuT9VBXNuTX6hjwejp5f6ap6wu7Bi8VII9KV1UkPHLhV+dPVSxHPQ/zBwe6FeeLNns+iv7mK3Y+ti3cfgVxXXPx5flxAFWaY8szNp8cbX7sMG55fgFvfK86v/J6+XqrMFY21y8lmdrpB9p6PZvfTsvFSwn17Rs2wJSuOOeTfhzlyVnzNMPkibda7r2PYpG1XVCQr4LutOfBSpKvqAGFWeSopnH5wzw2EfT00femCwXeIB7ymquoGt/NQPeLxUO6Wo58sjxEmBVh+UGk/C0/9BKoR4f699L34Y/p3/ByQIQNpwYXzotNuAFXKKiqoX3YsTTteoOOxdex+/A40CuIp10Lm0VuT10V93bQ/fAPV7/pI8lhqwWK8WfNY9mG3WnTzfZcmvWq2Jo4xk6ewQPuHd7++4BlNLopcPnhJfqWq3PIKAknh1LW7lUj8Ybt2RIC6dcUmIL/cy3XI8hj5sVXJ+aIgybD8bcJ09VJymZeGZJh4Posv2pDcXSgrL6c8DIflVwZgRH61brma6hMHPyeAN2tukl9gGVYMVnzNQCdcfBc5Ffz8vmexjqfupvrEC5ldniIIFb9iNu1br6Vi7oLkmFzvHso8ZV+mmUUfdOPKxPNdnEQhHd+6g5qTBtfo0TBH2JNBPJ/Mo7eSwV25ZVI+oZdGVXnh86sHj48i/DkL8CrmEPX3xvuzCW0P3uD2JVN3BZtftVmDLGV1x6C6lL4XfoSky2nbeh3+3Bp3pigkv01GXnZgYMgVcJTrZ/H562m57xJqVq1NQi+/on/71mvciv6AP6fKfeZ0EVaUNcYMzi6MZ0LmdT51N3VnfIbc7h34qRRe+Rzat15H2dzq5Jj8mM+9HRkaTv80YZAb7GGKQjKPrYvzyy06Ojy/8jpSaSLPJ4pCst272XHnuclzGoX4c2sAifPLo3XL1UQD++JMcvm1+9FbiPp7UY2SnnZJl9O66XKCnkzSq6dROJhf8XCK7MAAhRehi89fT8vGS6l57+Wj59fW65KlN/w5VUhq9OEoZuqMq/gSkQ8CnwReBZygqs8c4Lj3AHcAPnCvqn52POc145PpGWDR3/0z5YtWJo9lM7vIPL4OcOOxfrW9jRVnfYZdX7mMd3xsMHTyA/hnnfypkW+sStCdoX3rtckyF4JbVXrR3/1zssBfft/Ibfd8nNScBRx74efpG3C3CJo3raX2lH8iNb/OzciJLxSjbB+Lz7vdfRtkCfa2ub0p41uTkl+9esQCihrPchLwfFLz6xARwu42fN8n19tJFIXkMrvca0XIdcZX0GEQr6otzK5zm4+H/fvJ7t2N+D4vrXeBKxqx7d6P2YKCRyHLsKNPfnZh6sXnKVvkbo1lMzvpeHxdvOyLy4QlZ92c9Ojk5cd8rnnfW0a+sSrhvj2HzK/8vpE77/0o/uwqll9wFwBBdgAFGu8+n/qzbk3yS1XRgf00nPs5EEnyKzW/nuYNFwxpQs1Jl9D51N0wZGK4Ip5P+yOfovqv/4F0VT3g9o0dkl/xXYskv6L8wtkuv46/4Daeu3dtnGFtSX6BZVgxjLfn69fAB4AvHegAccvl3gX8NdAI/EREHlXV34zz3GYieR5hbyftD13Hc4uqaMl0k65c4PZsHEUUKWXVi4k0HlMAyfY6IIT7utyK1EHWXVmGObeiMtDx5F1oMEC4rwtQfn+7CwENA7x0WXKspMoJ9rbR+IXz0Cgiu3uHe3tVJFWwXVJ88VtxzBvY95v/ou70fyZduyIJOi9d7qarx23Lq192LPtrF7G3I0O8ZBfpghWgcx2NzFq0PLlyDvvd4HqJt92A6b8Rrzkky7ASIOLyq2XjpQQ9GTzPJ11Z7fZrHEWkEeW1y92CyakyFJJ8Otz82n7HWfGbDo7TynU2JvnVFOdXbveOZPC7y6/BXrvCOxA60OcybOFK931crHU87mY7SspNPPJTKRYcLL86m/BTqSS/nl1/8eCfV5xhll/FM67iS1WfB5KB0QdwAvCiqm6Lj30QOBWw4CoigWRAJkB63kL8WfPwsj1se+Byjj17HV3MItvfx/fuHJx2nevt5oSL7xr1DTNPfA7xPLw5VW4ZBs8n7OnALc4zOHNGgwHqz7nNhVnBwHh3a09o3XINkkoh4pGaW4OIEOzbQ/milfFsxyY3Ni2/Lk/8OcrqXo7GA/0Ll38oHHPR/vD1RNk+iELagHB/F6jSvPkTiOcPDnp1r2R/6zZyvZ3kejsPus+aOTpZhh3d8j/7qfmL8GbNY9H71tK+9Xrm19TSRxlh//4hRUeut5Ob15w59D0gXm7CjfnyKxck402jfXsG80PyG1OPnl/tW691z6vS/tANbjufebVJj5qkykjNX0Su062/GHQ2xeO+Bpd6EPHcOmUFuTZc61c/kWRYPr9atlyTrA02+MGUKAhG5BcMZpjlV/FMxZivJcCugu8bgTdPwXnNQaRTfrIpduFji+Ju59q55TRtb6ah4AcWIOULmSdvAcCNgxeiOCQ0N8Ci028c7J730+QyL9G29TrKFq6MV9h3qzXnN8ktq13m3liJN7u9k113nsOseTUEKtSfdQuzytNs//LgWmXplFuuQlLltD10/ZD2Rbl+FI33LcufUNAoom3zlUgqzaLTb4QodO3wPKIgS8cTd6BoPPXaybZvT64ugRErXpsZwzJsGvJTqWRTbIBUKs2SlccxUFNLZeVc9uz4w5CCA1zR0fukWx4nyLpZhhpkkz4oiRcehTi/OhvdWC8/7WYQqhvGIHFvf35WI4A/p5qaVZeTefRWor4e/MoFyYKprZuvIF2zNO6xSuP7Ptn+fjQM3J6Kwyg6uHRP3CsWdO+mfet1gCYZ5qXKiIIs6YUrafrCeSPyy7P8mrYOWXyJyHeB+lGeukZVvzmGc4x2SXnA6SMichFwEcCXrjidi05924EONeOQ8mXEkg+53j3ULlwIuFmRx569juOPqRvx2ucAT0M3pqGA64YXEKH9oevRbH+y2nS2I94T8hBrx+SXlBjo7SIKQ3bd/fcQBYifYqB9BwLMiicE1Jx8Ke1br0nGZwBoZzNhd4aKY/4kCS3x024AbDweLDW/jnBve7L+z/7WbWgUIKlymr+8ZvDz7O/C83yWrXzZqNuHmKPDVGZYYX6dvfZG3n7KmaMdZiaA7/tDNpzO9XYm45auWr+Fa1avGrXg2IYb49S86XLy6/MB8SQe91fd/vANSX65tf9CspmdY95KTDUk6M6w686zXbb4qWQV+rLycsIwRFJpvHTZkPzK97Slq5dR+E9M/LQrDM+4kd3fvIWymqXkOpqoqF3q8ivIAmL5dRQ5ZPGlqu8c5zkagWUF3y8Fmg9wLKq6AdgAwA/vtL0OJkHt3HIyT97C8Bj542MWjnm9sIaFC4jeeQW5uEc+XbuMxrvOc93/fpoo20fDuZ9DwxxtW65OVmcO93UCxINCNcmX/F902L0bAH/OAnwg1707WWen45EbkvMHuazrxq9eyqLTbnBjxYCmey6m/6VfMPu4NxPl+snPWCIK4+LQndsvmK3oesB8ak++NCnIYHBz7HyQm6PTVGZYYX7d8/Q2y69JcKDFWA9nbb2qhfWUv/PjyabcZYuOcXtHhoFbdLUgv9ofviHZYi3Jr45dyVY8Q5aiiBdqFvHw51YTdGdcvgC7H/lkcliUyyKpNKkFi1n0wU8la4FlMztp23K1mzwUuoIK3MxFjSLaHrwWjQJyHU2De8t6KVfIzZqbDP4Hy6/pbipuO/4EOE5EjgGagDOADx38JWYyjbXAOpjaueX88sFrCePc8Sur3fgFkXiS9mBnQc171iS3HVs2XgbK4JY98WGSLKsTuVWhz3Jd/y0PXEHD2bdCd5tbYT/2nZsvRCvmUX/mZ4YswOpVzKHnl08y0PS8C9K4cPNmzR1cUFaEitqh64AZcxCWYdPIRAwQr6ycy64HryVdWc3A3t2k5tXG+9bOB4bm16IPfopgbxtltcvdqvnqbje6AfTDxguKJLcf01X1tD7wT9SddSva0z6kF+6nnz0dqZjLog9+mhGdqFFAy6bL3NCI/Jiy+JCVH9vE9t0DCZ4AACAASURBVDvOGnKRaI5O411q4v3AncBC4HER+YWqvltEFuOmY5+sqoGIrAGexE3T/rKqPjfulpuiyt+WbO/3icpmEw30AYMzhjQKhg56V02mXXtls2i9fy3hvq5k3Sxws3jcrUFJxqMdYiB0cmsxf/FZc9IltN6/loUfuI5o/17K6o6Nn49o3XT5gd7lkA536xNzdLAMm5nyvUHHXnAnz9x0GuAm5oT79pDbvWNEfuWfz+dXy6bLCHs6huVXuRsoHwVutmVZeTyIXka9R+2ybWTvmaTKqD/rVoLOJsuvEjbe2Y7fAL4xyuPNwMkF3z8BPDGec5mpN9b9Ilec9RkAXtp8DZ3fusPtkbi/C6+njVwQgkbkMi8BStjTQeR5iAie51GW8sn17mFJ7TyaMhnaH76BspolY2qfMFicDbn49FIEmZfwZleNeI1XNpu2rdfj+YN7NmoYolFA25arkILHRSMqa93Vqk3HLk2WYaVrrAVH2byFLF59OwBNm68i89htRH09BHua8VMptxCrQi7zUpxfcUZEAal4yZtcbydKD+2PfIp09ZIx9az7fiqeZTnaReZBLjqjYMgaXRqGroQLgxFrd1l+TV+2wr05oEPdnqydW07L7la23bl6yOOeJ3gpf8htQoBfbW9j1vyaIYu2wuDCrfmetGVxMXcwnifk9neRzS+OmqdKFEW0f+MzSHrWkCtTL11B7bsvpn3r9az/9x8d8hzGmKPXoQqOfHEW9GSGFC2eeIgwZAPtvN2+zxs/sXXEe+WLvN7+gMVn3XzItnniuSUgkrXFYvESE01fPB9gRH4NZHaSKptl+VUCrPgyR+xgxdkJF981otesJdPNnNrFB3xN7dxymjK7h8zCdJvjjrwKbKiey+7OvWQeunbEc5Wzynj/X/8V3/7pixx7wZ0jnh+oqT1gG4wxM8PBirOb15xJ75OfY9uwx0WjUY8HV8ztyfxhyAxMDYNR+7Dmx8thNH3t00TD3tNPpVl6zHHJSv7DWX6VBiu+zKQYrTA79ux1HH/hKNsSFbzmhIvvIvPdwZ4xr28PLRsucL1p1YO3C2rnlrPtgQO/1xcf+yn/8QvPxjkYYw7bgQqzg80avGr9Fle0fffzgw/27aVpw4V44g0pmsaysvzNa860/CphVnyZaWUiZmIC/NmrGzj5b07jhPede+iDjTFmAkzk2Cobp1XarPgyU2asA/gnwmuOXUzwHz+Z8Pc1xsxcNmvQTBQrvsyUmaherbFK7e8giiI8b2yrUhtjzMFYb5SZKPa/kilZf/dnL+f5//1OsZthjDHGDGHFlylZf3xsHXu2PVvsZhhjjDFDWPFlStaiBXPxe1oIg6DYTTHGGGMSVnyZknZc3Ry692SK3QxjjDEmYcWXKWmn/9lxbPvxk8VuhjHGGJOw4suUtMW18+lp/F2xm2GMMcYkrPgyJa2iPE19eZb9Pd3FbooxxhgDWPFlZoA3HFNL2/bfFLsZxhhjDDDO4ktEPigiz4lIJCJvOshxO0TkVyLyCxF5ZjznNOZwnfrWV9D0qx8WuxlmGrIMM8YUw3hXuP818AHgS2M49h2qatPOzJSbM6uMaM+uYjfDTE+WYcaYKTeuni9VfV5VbTSzmdZEhNcvq6S96aViN8VMM5ZhxphimKoxXwp8R0R+KiIXTdE5jUn82auX0vxru/VojphlmDFmwhyy+BKR74rIr0f5dephnOdtqvoG4CTgoyLy9oOc7yIReUZEntnwzR8cximMObA3v2opHb+3oToz0VRmWGF+Pf2obcJsjBndIcd8qeo7x3sSVW2Of28XkW8AJwBPH+DYDcAGAH54p4733Mbk1c4WctkB0mXlxW6KmUJTmWGF+XXP09ssv4wxo5r0244iMkdE5ua/Bt6FG+RqzJQ68dUL2fWCLTlhDo9lmDFmoo13qYn3i0gj8FbgcRF5Mn58sYg8ER9WB/yPiPwS+DHwuKp+ezznNeZIvOVVy2j95feK3QwzjViGGWOKYVxLTajqN4BvjPJ4M3By/PU24I/Hcx5jJkJD7XzCjh8XuxlmGrEMM8YUg61wb2aUlbUVdGXait0MY4wxM5gVX2ZGOelPlrLrd78sdjOMMcbMYFZ8mRnlDa9YSsevR51oa4wxxkwJK77MjFJelqYy6i52M4wxxsxgVnyZGedNL1/Erud/XuxmGGOMmaGs+DIzzqlveTkvPfOdYjfDGGPMDGXFl5lxFi2Yi/ZmULUFyI0xxkw9K77MjLRygc9A3/5iN8MYY8wMZMWXmZH+6rXL+P2Pnyp2M4wxxsxAVnyZGemtr1rCnt/9pNjNMMYYMwNZ8WVmpPKyNHOkn9zAQLGbYowxZoax4svMWK9dOo+uDttqyBhjzNSy4svMWB9463H87r9G7KlsjDHGTCorvsyM1VA7D29vY7GbYYwxZoYZV/ElIv8iIr8VkWdF5BsiUnWA494jIr8TkRdF5BPjOacxE0VEaJjr070nU+ymmCKxDDPGFMN4e76eAl6jqq8Dfg9cNfwAEfGBu4CTgFcDZ4rIq8d5XmMmxBuPraFt+2+L3QxTPJZhxpgpN67iS1W/o6pB/O2PgKWjHHYC8KKqblPVLPAgcOp4zmvMRHn/2/6Ibf/7eLGbYYrEMswYUwwTOebrfOBbozy+BNhV8H1j/JgxRed5HvWzbZshA1iGGWOmyCGLLxH5roj8epRfpxYccw0QAJtHe4tRHjvg/3YicpGIPCMiz2z45g/G8hmMGZdX1VXQ+IffFbsZZpJMZYYV5tfTj26ZmA9gjCk5qUMdoKrvPNjzInIesAo4UUffqbgRWFbw/VKg+SDn2wBsAOCHd1qXhJl0f/u2V/KJxx9j6cteWeymmEkwlRlWmF/3PL3N8ssYM6rxznZ8D3AlcIqqHmiX4p8Ax4nIMSJSBpwBPDqe8xozkepr5jHQ2VLsZpgisAwzxhTDeMd8rQfmAk+JyC9E5IsAIrJYRJ4AiAezrgGeBJ4HHlLV58Z5XmMmVN0cIQhyxW6GmXqWYcaYKXfI244Ho6ovP8DjzcDJBd8/ATwxnnMZM5lOel0dj//4+7z2//vrYjfFTCHLMGNMMdgK98YA73jDcex9/n+K3QxjjDEzwLh6viZd3fHFboGZIcqA1x/fxx81zC12U0yJsH9LxswsdfMrxnysjD655+gnIhfFM49KVql/xlL/fGCf0YxuJvyZ2WcsDaX+GSfr85XybceLit2AKVDqn7HUPx/YZzSjmwl/ZvYZS0Opf8ZJ+XylXHwZY4wxxkw7VnwZY4wxxkyhUi6+SvYedIFS/4yl/vnAPqMZ3Uz4M7PPWBpK/TNOyucr2QH3xhhjjDHTUSn3fBljjDHGTDslXXyJyL+IyG9F5FkR+YaIVBW7TRNJRD4oIs+JSCQibyp2eyaSiLxHRH4nIi+KyCeK3Z6JJiJfFpF2Efl1sdsyGURkmYh8T0Sej/+NXlLsNh1tSj2/oHQzzPLr6DfZGVbSxRfwFPAaVX0d8HvgqiK3Z6L9GvgA8HSxGzKRRMQH7gJOAl4NnCkiry5uqybcfcB7it2ISRQAa1X1VcBbgI+W4N/hZCv1/IISzDDLr5IxqRlW0sWXqn4n3hQX4EfA0mK2Z6Kp6vOq+rtit2MSnAC8qKrbVDULPAicWuQ2TShVfRroLHY7Jouqtqjqz+Kve3AbUi8pbquOLqWeX1CyGWb5VQImO8NKuvga5nzgW8VuhBmTJcCugu8bsf+4j1oishL4E+D/ituSo5rl19HD8qvETEaGTe+9HcdARL4L1I/y1DWq+s34mGtwXYibp7JtE2Esn68EySiP2bTco5CIVAJfAy5V1e5it2e6KfX8ghmZYZZfJWSyMuyoL75U9Z0He15EzgNWASfqUbiuxqE+X4lqBJYVfL8UaC5SW8wREpE0LrQ2q+rXi92e6ajU8wtmZIZZfpWIycywkr7tKCLvAa4ETlHV/cVujxmznwDHicgxIlIGnAE8WuQ2mcMgIgL8K/C8qq4rdnuORpZfRy3LrxIw2RlW0sUXsB6YCzwlIr8QkS8Wu0ETSUTeLyKNwFuBx0XkyWK3aSLEg4zXAE/iBjk+pKrPFbdVE0tEtgD/C7xSRBpF5O+L3aYJ9jbgHOCv4p+9X4jIycVu1FGmpPMLSjPDLL9KxqRmmK1wb4wxxhgzhUq958sYY4wxZlqx4ssYY4wxZgpZ8WWMMcYYM4Ws+DLGGGOMmUJWfBljjDHGTCErvowxxhhjppAVX8YYY4wxU8iKLzNuInK1iNxb7HYYY4wxRwMrvgwiskNE2kRkTsFjF4jI98fyelW9SVUvmIR2fV9E+kWkV0T2isjTIvLaiT6PMcYcioj8mYj8MM6iThH5gYj8uYjsE5G5oxz/cxFZIyIrRURF5GfDnq8VkayI7JiyD2GmDSu+TF4KuKTYjRjFGlWtBGqA7wP3F7c5xpiZRkTmAY8BdwLVwBLgU8Be3Ebafzvs+NcArwa2FDw8J34870PA9klstpnGrPgyef8C/KOIVI32pIjcISK7RKRbRH4qIn9e8NwnReSB+Otvi8iaYa/9pYh8IP76j0TkqfjK8Xci8ndjaVy8X9qDuEDLv+8JIvK/ItIlIi0isj7eyBYRuUtEbhvWjn8XkUvjrxeLyNdEZLeIbBeRjw9732fiz9omIrYxtDEz2ysAVHWLqoaq2qeq31HVZ4GNwLnDjj8XeFxVOwoeux84b9gxmyaz0Wb6suLL5D2D61n6xwM8/xPg9birvq8CD4tIxSjHfRU4M/+NiLwaWIHbNHcO8FR8zKL4uC+IyPGHalxcVJ0F/Kjg4RC4DKjFbcx7IvCR+LmNwJki4sWvr42f3xI/9u/AL3FXsCcCl4rIu+PX3gHcoarzgJcBDx2qfcaYkvZ7IBSRjSJykogsKHjufuDPRWQ5QJwvH2JkYfUAcIaI+CLyKtym6f83BW0305AVX6bQ9cDHRGTh8CdU9QFV7VDVQFVvA8qBV47yHt8AXi8iK+LvzwK+rqoDwCpgh6p+JX6fnwFfA047SJs+LyJdQC+wBtfVn2/TT1X1R/F77QC+BPxF/NyPcbcETowPPwP4vqq2AX8KLFTVT6tqVlW3AffExwDkgJeLSK2q9qpqYcFnjJlhVLUb+DNAcVmxW0QeFZE6Vd0F/Bdwdnz4iUAF8Piwt2kEfge8E9cDZr1eM5gVXyahqr/GjWv4xPDnRGStiDwfDzbtAubjepyGv0cPLnTyhcwZwOb46xXAm+PbhF3x+5wF1B+kWR9X1SpcmK0CHhGR18VteoWIPCYirSLSDdw0rE0bGQzEsxkcL7YCWDysHVcDdfHzf4+7zfBbEfmJiKw6SPuMMTOAqj6vqqtVdSnwGmAxcHv8dOGtx3OAr6pqbpS32QSsxvX6PzC5LTbTmRVfZrgbgAtxt+MAiMd3XQn8HbAgLob2AnKA99iCu+X3VmAW8L348V3Af6lqVcGvSlW9+FCNUtVIVf8beBF4V/zw3cBvgePiW4RXD2vTA8CpIvLHwKuAfytox/Zh7ZirqifH53pBVc/E3Rq9BVfwzcEYYwBV/S1wH64IA/g6sERE3gF8gAP3an0NeC+wTVVfmux2munLii8zhKq+CGwFPl7w8FwgAHYDKRG5Hph3kLd5Ate79Glgq6pG8eOPAa8QkXNEJB3/+tN4/MMhxcXcq4HnCtrVDfSKyB8BQ4o4VW3EjVW7H/iaqvbFT/0Y6BaRK0VkVjwG4zUi8qfxec4WkYVxu7vi14RjaaMxpvTEE4XWisjS+PtluN6rHwGo6j7gEeArwEuq+sxo7xMf91fAhC/NY44uVnyZ0XwaKOzpeRL4Fm7Q6UtAP673aFTx+K6v48Y2fLXg8R5cr9UZQDPQiutZKj9IW9bH63z14oqoa1X1W/Fz/4gb2NqDG4exdZTXbwReS8ESFaoaAu/DTSDYDmSAe3G3UgHeAzwXn/MO4AxV7T9IG40xpa0HeDPwfyKyD1d0/RpYW3DMRtxF50HHcqnqM6r6h8lqqDk6iKoWuw3GTBoReTvu9uPKgh44Y4wxpmis58uULBFJ4xaOvdcKL2OMMdOFFV+mJMXjyLqABgZnJBljjDFFZ7cdjTHGGGOmkPV8GWOMMcZMISu+jDHGGGOmUKrYDTiYLT/eafdEjZlBzjxh+YEW7j3qWH4ZM7O8oq6SN66oHlOGTeviq7c/KHYTjDHmiFh+GTOzDOTGPql+3LcdRWSZiHwv3vfvORG5ZJRjREQ+LyIvisizIvKG8Z7XGGMmgmWYMWaqTUTPVwCsVdWfichc4Kci8pSq/qbgmJOA4+Jfb8btyffmCTi3McaMl2WYMWZKjbvnS1VbVPVn8dc9wPMUbMocOxXYpM6PgCoRaRjvuY0xZrwsw4wxU21Cx3yJyErgT4D/G/bUEobuBdgYP9YyyntcBFwEcPbaG3n7KWcOfR5lfjqiwgeR6Tc2V1XpD2FvzkOZfu0zxhzYeDPM8ssYMxYTVnyJSCXwNeBSVe0e/vQoLxl1JpCqbgA2ANzz9LYRx8xPR1TNqSCSFEzD8EKVCg1gXz9dOb/YrTHGjNFEZJjllzFmLCZkna94D72vAZtV9eujHNIILCv4finQfCTnqvCZvsEFIEIkKSost4w5akxVhll+GWNgYmY7CvCvwPOquu4Ahz0KnBvPGHoLsFdVR9xyHOP5pm9w5YlMy1sKxpiRpjLDLL+MMTAxtx3fBpwD/EpEfhE/djWwHEBVvwg8AZwMvAjsBz48Aectmmf+5z+5+5bricKQ93zgQ5x+wceK3SRjzJGzDLMMM2ZKjbv4UtX/YfTxEIXHKPDR8Z5rOgjDkLs+czU3bdhKbX0DHz/jJN7yjnex4mWvLHbTjDFHwDLMMsyYqWZ7Ox6m3/3q5zQsX0nDshWk02X8xUmn8r/fe7LYzTLGmDGxDDOm+Kb19kLjdcm572dv9/BJSzB/3jzu2PSNI3rPjvZWFtYPLgFUW9fA7579+RG30RhjRjMZ+QWWYcZMByVdfO3t7ua4i9aPePyFDWuO+D3d3YehbHCqMWaiTUZ+gWWYMdOB3XY8TLV1DexubUq+z7S1UL2orogtMsaYsbMMM6b4rPg6TK98zetpfmk7rY07yeWy/Ne3vslb/vLdxW6WMcaMiWWYMcVX0rcdJ4OfSvGRq2/imn84kygMedf7z2Dly22WkDHm6GAZZkzxWfF1BE54+4mc8PYTi90MY4w5IpZhxhRXSRdf8+fNG3Vw6vx584rQGmOMGTvLL2NKV0kXX+OZjm2MMcVk+WVM6bIB98YYY4wxU8iKL2OMMcaYKWTFlzHGGGPMFLLiyxhjjDFmClnxdQTWXXcZp//Fa/h/7//LYjfFGGMOi+WXMcU3IcWXiHxZRNpF5NcHeP4vRWSviPwi/nX9RJy3WP761L/jxru/WuxmGGMmgOWXMWaqTVTP133Aew5xzH+r6uvjX5+eoPMWxWvf9Fbmzl9Q7GYYYybGfVh+GWOm0IQUX6r6NNA5Ee81Gfbu6eAzHz+b7q5p20RjTJFYfhljptpUjvl6q4j8UkS+JSLHT+F5+c9/20zU/Ev+4xsPTOVpjTGlw/LLGDNhpqr4+hmwQlX/GLgT+LcDHSgiF4nIMyLyzNOPbhn3iffu6eDnTz3C7R9Yys+fesSuHo0xh8vyyxgzoaak+FLVblXtjb9+AkiLSO0Bjt2gqm9S1Te9/ZQzx33u//y3zbzv5XBc3Sze93Ls6tEYc1gsv4wxE21Kii8RqRcRib8+IT5vx2SfN3/V+KE3zgfgQ2+cPyFXjzdfcTGXnb2Kxh1/4OwT38C3v24zh4wpVZZfxpiJNiEba4vIFuAvgVoRaQRuANIAqvpF4DTgYhEJgD7gDFXViTj3weSvGmsq04D7PX/1+P4Pf/yI3/eqW++eqCYaY4rM8ssYM9UmpPhS1YP2r6vqemD9RJzrcPzqx//Nf7f0s+XZxiGPV+3+73GFlzGmdFh+GWOm2oQUX9PV9Xc/XOwmGGPMEbH8MqZ02fZCxhhjjDFT6KgrvlQVJn+4xfioMgVDQowxRxnLL2MMHIXFV38IngbTN8BU8TSgPyx2Q4wx043llzEGjsIxX3tzHuzrp8KHePb3tKKq9IdxO40xpoDllzEGjsLiSxG6cj7kit0SY4w5PJZfxhg4Cm87GmOMMcYczaz4MsYYY4yZQlZ8GWOMMcZMISu+jDHGGGOm0FE34N6UrpvXnElvbw8AezsyRBoBIBpRtbAegMrKuVy1fsuY3qPQoV5njDHjlc+fwvyCwQwbSw5Zhs0MVnyZorh5zZk0vbR9SEBFUUh6fj31p1xOaqAf8dyGwm1br6W3PwCga/cLB33f3t4ejr3gzhGPb7v3YxPYemPMTHagIivKZVl8zq3U5HKIn04ez2fYofILLMNmCiu+zGE5kquywtd07W5FxSOKQvzZVUT7uhDPA/HwZ1cRdLfT/LWbkFQ5DefeBoBfuYDFq28H4KX15x60LXsy7Tx371qOv+C2Cfm8xpjScrgZdqD8Ss1ZgJbPcRnmp/DnVBF1Z2j/99vQMMCbNY/6M28CBjPsUPmVP4cpfVZ8mcNysKuyg4XJG65ymwQ/u/5iFq++nb72nfjzF9H21StpOO92spmdpObXE+xtJV1VT8umy4lyWQA0DOhr3wlAlB3gmtWrAFdopSurAfArZnP8BbfRtOMFOh5bx3P3riXs35+0IdfbyTWrV1nXvTEz3OFm2J5MOxW1Szn+gtuG5Feqqg6A1s1XUPPeyymrXU62fTtli44hyg3QuvmKERmmYZicozC/YDDDfvrZ00fkF7gMu3nNmZZfJWJCii8R+TKwCmhX1deM8rwAdwAnA/uB1ar6s4k4tym+5+5dS3+mnT2ZdurPuDF53Pd96pcdy08/e/phvd/gvnKDW7AoiogHforyd36cKAhYCKSrlwDQsulydr74PKgSBDmC3r00nLsOAAHC7jaWrDyOn938waR4K2RF2cxl+TWz5QudXG/nkAzL51f+gm6sNMgWfjf4pQgK7NrxB5acdTOpgYEkv2Aww6IwZGBYfqXKyhnI7KRxy9WWXyVionq+7gPWA5sO8PxJwHHxrzcDd8e/m6NA/kqta3croSpdt18AQNTX44LG83ARAS1brkGjCA0DxE+xK36PZ24+HTQEb+z/5Lx0OaqK+CnKapfTcv9ad94oLtBEkt/F80ktWEywpxnx3TkkVQYMDUMVj9nvvowwHLp53a4Hr7WrypnrPiy/StqBMizs6QDPH3Js8+arRuQXwDM3nTam/MpfO4oIkipzv/spVF0uqueTHegHLbjQFAHPp2zhCvetn7L8KnETUnyp6tMisvIgh5wKbFL3L+1HIlIlIg2q2jIR5zeT5+Y1Z7Jrxx9IV1YT4eHPnuuCoWw2tadcQcfj66h57+UApObXIakych27yDx2Gw3n3U6uYxep+XV46XJa7ruEcN8ectkBNL4iVFU0yg9YVQgDgs4mwn1dtNx3KQDBvk5a7ruUoCeDeJ57rZ+CKET8NBrmXHghoErYuweAXMZFp0YBZeXlyWcKw5Dy2uVDPme6snrUW6aj/XnYTKTSYvlV2g6WYQANqz9PrrMRcBkW7G1L8kuAbJxhbZuvGJJfSeEU/x7lsq7nvbMRRAh7O2ndeBngMizz6K3xhaG6Ii4KgbgwC91+U/l+srB3z5D8ilIpfN8ViZZfpWGqxnwtgSEXEY3xYxZe01zTS9tZeNonES+FRgGpBYsRP03rpstGPb7twWuI+rqJ+rpp3XgZGgXg+Xjlswc74P0UIh7BnmaIQhc8YeC+h6QnKx9FAtSsupyOJ+4g7NmdnEs1Ird7OyAE3RmaN7irWVfMaVyLufcIw5CmHS8QRSFhcHgb6xUG1mi3JcBmIpU4y6+jVL7wGpFhqTStmy4fcXz7wzcQ9fcm+QWu+PEqKkfPL0CjMM6vJve8SNJD5oZLCALUrlpLx7fXD2aYRgSdu8jfNQh7Omj6wmr3qigYkl/gMszyq3RMVfElozymozyGiFwEXARw9tobefspZ05mu8whRBqRrl6CpMrIZna6W31hLgmcwnAA0IH91J9zG8HeNtI1y9Agi/hpWjZd5gqtIBsHDCAemuunecOFbrbjnCoAwt5O/LkLqf/QzYAb0FpWuxwNBvDnVLuvVcl1uP8P09VL8efWUH/WrQAEnU20PXQ9GrrCD89H8aioXYqIDwhBdgBlMNyCIEdH224uPumNiO/jicf8mloqK+cOGaD77PqLk6vOgczOSfyTN9OI5ddRqre3h3Rl9cgMC3IuH4aJsn1D8gvcbb/WzVeMml9ohEYh7Y98Cr9yAeDyy5s1j1T1UhaddoPr9d94qcuhYAC/spp0zbLB/KpZBqpJho2WX1EE4nlJfgFDMiyfX/9w8hsRJcmwJSuOsfyapqaq+GoElhV8vxRoHu1AVd0AbAC45+ltowacObQjXRKicfsLqAxufBCFIbnOpmTNGi9VRpQbcKHgp5Ah/y9JcjtxOMH9b5WqqmfZh+9IHt9570epXbWW2fXuCqxp81WEdKLBAC3xlWnU1002Diopn+2CTCHc1+naNGcBXvnswVuQfgrxU2Qeuw3xXDd/uK+LsrnV8e1JyO1tG1I4Rn3dEAWQKsebs4Cgq42OthY62loApfNmN2lg6GBaM0NYfk2x8Sxpk18OAtzagQC5rlY3YQfw/DRRkE16wvKZMBaj5Vfd31wJQHnt8iS/wGVKa36cal9PfJsRpGw2rRsvS/LLn1ONoi7D4nFew/PL3cbcM6SthRkW9XW7IRlRFF9sCkG2j5f+8HsAOj5zGuL5qIY0bb6KJWfdPObPbCbHVBVfjwJrRORB3EDVvTZeYnIdyUJ9vb09RKrJFRyA9nbGV2xZN85quILckhHF2KDBsQyd7PrKJQB48ZiLQlF2P3Vn3DR4vigk89htEHe1153xmeTYlq98DPFSEHM+rAAAIABJREFU1J11iztHONgd71VUUrtqLakFi2l/+AbY1+WOyfXTtvW6pO35QFYFf24ttae4XrbmL69h8fnr3a3NjkbK4qvFXXeefaA/PlO6LL+m2JHm17EX3MkzN52WZJinSrjPjQHNZ0pSEY8SVQfKr7zC/AKS984bLb8AlzlxoZTPsNaNl6Eo9ed9Lrmoy/8+PL801++eL8wvXG8YuLeuO+PGIWNwOx5bR/15bsZkPsOy7dvp+NZg8WiKZ6KWmtgC/CVQKyKNwA1AGkBVvwg8gZum/SJuqvaHJ+K8ZhKIj5TNRrP7Rz4XBWTbtgFufELblqvRMKBty9W48iruDu9sQlFy7duTl4Y9HYCSrl2RLDzYsvHSUZuQmr+IXIcbACv52UXi+s5ynY2D04mgoKdNEb9sSAEG0Pbg1QRdbYjnke3pAM8thijpcmre9VFS8VRvd4U6NHhVo6RqzK/XA9CfaaSidumIdttg1qOT5Vdp0SEzAQt/phWigNzuHW5gfHcGDQMa15/D4OWhjDm/ABrvOpfh0jVLk5zE8wcLOhGivp7kdmN+PGw+z5IV8Qt6t4K9bWTbd+Cly+L3SyXDMyRVRt2ZnwWU1vvXJheSQ/8wBr/MTwjQMKA/0ziizLT8mloTNdvxoAMb4llCH52Ic5kjl1+Pa/g6McN/uDS7n4bz3IryrVuupuOxdW7GTZ/7wfRmzSVds5Tqd30k6RFqvu8SiEKivm7SC1cOBoi6gskVPBVEfd003nUeblCpm+XYtvVa/DkL8NIVbiq2utmMZTVLQTy8WfPc1V8UQZBLCqYhwYa4bveC3E0tWAJhSN3p/0y6dgUiQrZjF+maZW4wrQyGIhAP8o+LLiDobHZfRyHB3sFVp1vu/0fKF9ST6+2kXTwijRCNbDDrUcry6+gw1vzC85P8UlXaHryajsfXEXRnEM/Hn1OFRiGpqjoaznE7YTTfdwkN536OxrvOHZFf+d6lVFUDUV83zV9eE2ehy7DmzZ8gVVmNxr1s+d6rdM1Sl05xUdT+yKfceefXJe0U1/2OO2xo8ZSfGCB+iqUf3YRGEbnOxmQsWstX4lyJX5+qXjKk2Ar370lmcRZmWLivi9YtVyOC5VcR2Qr3M0jYv///Z++8A6Qqr/f/eW+Zsr2xhaUXFRF7NJrYY8eOIjYsaIJig19MsLfY8gVBUaNBRZAmGCMComKJRqNiBcGCFLfAltleptzy/v64M3dndpeyuKjIPP/A3r1Td+5nznvec55D4fn3UtxvcMLxrV1chef/DaO2HGlGqFxwOwB5p453UuqdFKxK22bzzBsRcfCyWupQ03sgjRBFl0zGqK9Az+2FjG4lVsyaQMGZf6Fi3q0AbV5dUWgVjrqPzc/dgNUUoHLB7XgzegBgN9cgLZPyJ69I9DJUVNT0PJy8WHyWrKOE5mmr43JjsVgq33abBmJSU7OwWuoJN1aDZYHucYFV9fJDyWLWpJLaSdoRfgkg94RrkNJ2MvXSIveU6LZheyAIwDY78MtpMjJdfgEJDKuYNYH8M24CYNOcidiNlc7dxWWitIw8el82lfUPjyQw969AjF9WNPMWL4mWWej8L8rI9o1Nseen6NEa3NixhESfjAZq0r0f91e25TRNJfn1sykZfO2Gqihdn2DSVxddTTbUBHCc/xIvdilttJxi1BQn3R3LPJm15dELXzieW7oPoSjknXKDu10obZOaJZPJH34DNYsno3u8mAgHTEKge7xomk5xv8GE8/IBUDUNJaOgw/P2ZPTAaK5l33FPuMfKN67Fm9eHTTNuYN9xT7hu1eHGSgJz/4rdUh8FaNv9iPgCWymd9vJgI9K2CSxyOibtYCNYpmOdEW5xbxMrbNVzeiGMIIALrPaATCqppLpfW+NXRamz3deeX3pub3e7TsvunF9GTRlC83bgl6ppbJ5zM5qmo3scv8B4hsX4BVChqBT3G+xM2+hEQtUS+AW4Dvqx46unTyBYXUbFrAlomh57EXGLwzZ+xbL0VksdZY+NRtqWyzBp21Q8/2enKz1qeRELOLWsQtToNkGSXz+PksHXr1D3jxtFQ02gw1gf24igqmoHkz49LYcBYx7ls/vPddzpbbPTrj6hak4avkNtgnPRFo26j82zb0IoGv78thWU2lmh/haUlpZO6bxbUfwZCTBQdB9GUzVKJ3UNoUAZkaZaVk4bm3A8/4w/U/Xy3x3fMKE4dWS2hW2EnXoLKTECP2CHmsgfcWfCa6pZNo3KF25HKAr5I+8hRj4hFKS0qVkyGbULXVJJJZXU9unH8Kt89kSgk67kWF2Vom6RX56sQtS07A78Ku43mAqlk3qqTqQIhfXTr6W2qgIlJbPtuO4jHCjZYkF/pLmuA7/yhk/A4/VSOvMmhJLIr5iM6h+ir02QP+KOhNdUs+wxzPoKCkb9jYTVp2USWDIZYoFdUj+LksHXr1DNzU0c8JeOBZKfPjDSnVXWmbJ6FFKzuQw72IjZUNn2C9siPkefWJsg3GJQq7HSSc/bppu2VlUV1ZfibimGAyUElk5xbB3AbaFeOW0sZlOAv81YzC2XDu9Sp5O0bdS0bHpeOsU9VjL9moTVsW1EkNJGqDpmQyV2sJHAkskIVcNqaQDhrIgV3VnZCt3b4XEgWuBvW1jNtVjSxhPdAk0qqaS6Rz+GXwA1lZs78Cu+Eaer/AIQ0sZoriUcKKF6yVRsI+TWiFot9aycNhbVl0Jmbt5WGbbFIE7KBH6VPns9ek4xVmOlk5hrxy9wsvA1r051Jn+As5OQ1xehKFTMvRlpxoK0hLR/tPC/kQiChCLZpH5SJYOv3VShQJlb8B7LGhnNtdECc5uq+be650rbRk3NRPVnIqXtdimaTQHHgiZqRRFe/ggEG6h58W4yc/Pc2/uBHv0G0tzchKo6RoNFlz3i/t6oKcOTVUj5U1cCTvars0ArLS0dSAzCGmoCmJaBnlmYUKegeHxUzLsVIW2q5t+GjBqNxbYe1Ix8epx6I768XmyYeiFCSVwFykgrBSPvIbB0CnpO7zZ/sJpSUFTUtBys5joijdVu67kdbGLjo073kzQjhIt6JTzvpJJKqvsUY1g8v8CxnOnIryyE5kVoepf5tR7o1X8wzc1NFPcbTJUZpvDiSYho5sioKUPPKqRi1gT8qT5gywzTFNHheENNwHn8OH4JRaNi1gS3/KH8ycsT+AUOw3qPnsQP0zp2XMqIY3ZdMWsCek60M1uINn6lZpM3/P9RMXdikl8/k5LB1y6szkxRwYFNSul6t1sFnDoJ2zJZcd+IDoWm0jKxpMS2TISqUzDynoRhs4FFfyfv1BtJKRqIEQm7RoCbn7sBVQj2HfcE66dfy99mLN7m8y2dPRHbtpx0edszIFJb7m4pdrWtubNVZvHV0zo8p3Gn/Zb8qCGiI9sFnoxm4trmtcVOSVw1x+ZJOr+XIFRyTx2PQCBtg9gqsyrOiyeppJLqqK7wC0jgl2soShu/hC8dq6UOoerkj7zXvV1g0UMUXjwZs67cdbqHHePX+unXYrbUtY1Cc54BZn0lZksdaQV7ADvGsPhGguJrpgEkPK+O/HJ2EtpKMdrm5TrOPyL6/nTCrzhD7CS/fh4lg69dWM3NTWjpeQnpaui45QbOXDChaPS7dibBqpLoBSlBKFTOvw2ruZbYhVfz2jTyTrkRLbsnAEL3UPnC7Xgze2DGdc0I3UeksdrdMtyWJk6byy2XDqc5ZLqO9jGFAyUJq82doczcvA6dUgAVqoqqOZeCW8wqSAhAhRYNuBQVNaOHUxdn287Wg22haBqW7UwAiLlMN4ecgK4usI77x41KeuUklVScusIvAISg/3WzCVbFRgRFQEDl/Nuj/AKXYa8+4o4bU/zpVMwaj9VSn5A52hF+AVw9/JAO/AJQFHWnXuNb4lfY5aaIGkc7/yrRIDMmoeoJ/HJLP5L8+lmUDL52Q7l+WEKJDsrWUFOy2uqZGqupfvkBFF8a+efeRe4J11D5wm2k+TRqqqrR0qMXu22i5xTT88L7O0197yoS0na2KH1Oet1xrY52cGped0QIOHUWij8DofvwZjr1XqqmUdxvcHRwt42e1xs1LRtb8yIjQYQvnR/Wf8fVww8BnK2Hoj4DkoaGSSW1AxKKgh3nIB/jF8TVZDZWUTnvZvLPvYv8EXdi1pZT9cLtWK31vzp+gcONkhdud4v8rZZ6IlUbOvArFoAK3UdK4UDHlmcL/Mq/4EEqXri9A79iMyOBJMN+hJLB169Qiu7UO8WsG8Bpx44VV/ryetG6eV2bU7y03RE94Lgqa5kFVM75C4rupeb1x9qMABHuQFpF95F34th2qe+fXtuqEdvWeb36O6vJGEjqQk0IXzp6TjEFo+4jVpQbCZRQs2QyRaOnYARKqVs6ucN9SWTUGV9iR4IUXfIw0jIw6za5q+Ufpl2yQ+NTkkpqd1Bn/AIcXz2i/KpY5zQCSdmOX05xp9lQSc2SySi6l4q5E7GDTQhVBdv+xfELto9hWztn4rS5CQ71Na31Lr+EEI5ThaZT/vhoel4+DSNQ2unzSOBXlGGFF09K4Fc4UELzcqdmN8mwHVcy+NrFFH+B1QWqEL50pw4L0KIeND1OvZ7w8kfcWoH7x41ygi+hOin7OLkDs3UvUspOLSakEabgvHvoM2gI3qivFsCmGTe4afBwF7YMVV8Km2YkjhYymmvp3W/gdt9HvLZ3lRU7r/0Yjdj/YxC75dLhBPFQPecv0W1WB+hWdD7k5hnXY0bHjQhFxTKNNl8fKV3/H6sxQMWs8U4NRnSFDomjipJKanfSjvKrubkJ1Dh+SUDT3AXkVvkVdgII2VQF8KP5JaTdgV+x4zuq7WFY/Dnx72Nzc5Pr+h9j2DWnHuzyS6hadOSS8/7E+CUUBWlG4vjl1IrF+FW18E7sljoqZt+UwC+haGR4kzY7P1bJ4GsXU3zGZOW0sU6habxL+xZuU3j+vVS9MsmplZC2k8mK+lxJ23Zmkakase02s24TVnOt4yzfUk/n/vBdV1paOjQ3gS/xo5eWN/AnS1VvT9Zp6Bhn7EjJ91+jR01lN88c73Y5lT9+Kb2umUnpY5dQteBOt/jXlW2BolJw4UMIRUNahmtjUTotOZw7qd1TO8qvAWMepX7KGPS83o7flRAQc3+3bSJVG5xATNOd8T2WGcevWsy6cjxeX+e1ZF1UrPOxvdLyOtZj7Sxti2FZPQoZMObRBH5JKamcfRMFFz7EpqeupNc1M6mYezNVC5yxRwmyTVA1Cs5zRrPF82vTM+PA69+5L3A3UDL42oWl+lKIVP3gOBtLCyVaIB6/J++eq6oIRaNy3s2oqdnOxRY/G1HV0HN7u63IxF2MTl1A9xjy7Yq1AMItXBVOl5OiOnPWAqWo/gyKLplMxfN/pmj0w46/TiSEtE2slnrM+gr3tu5Mt6SRdFJJdYlf4NjHOHMVnbrL+IYYvUc/h11Suu707bU1j7CuaFdjmMsvM4KMLqxj/Mo9fizVix5CqCqFF0+mcp7DL4hZccjofMg4fiXVLUoGX7uQYtuH8U7InoweqL4U/EQ6bZWO3UYLh8g58RoCiydRcOFDbl0X4KSVibpCx42iUPwZ5J82gaqXH3LvT1VV157BaK51V1q/Ri8YZ1SJjNsmdKImPasQoSjUvvEEdrCRitk3YbXUsfm58VgttRScfx9qRg/HYyc6BNeoKXXNHAHqqys6OFoD29V1lVRSu6raf+5jJsVmU4DHFn/c6W0aagKUfL+GnBOc2eaBxZPIO/2mKL8EFbP/3Fa/apkY1RujNgrC5Vds8djd/Kos24AZibBxxevY4RbMSIRUq5G0FF+Hc2saW5FpPZwaLBT6/fZUNI+Hwt4DUFS1k3v/cerIrza5/Iq0YgcbnPNntvFLy+7J5hnXoUWtOdrzq6EmgC3tDgxzvouS2h4lg69dSLHtw/jRGuDULsRv4yUUXm4ui5oBtt+jjxvd4/FTNf/WqHFoLYo/A6u1HlUIwssfwQ42uk7P8d474bz8bXrj7IqKFbbWBaqiTI/659g2VQvucFbdCOxIK/kj78WT14dIoARPXh82z7jBnbcGcWNObIuIa6IosW3IOuk697yY1UXV/Nt3/gtMKqmfSVIoHawlgIRuw3h+1VdXYNlOINXuntx/FY/fDRwAN7MfM07tLn5ZpknZ+m8o++QNjLpNZKVo7NlDJy8jhSuO70Xv/I72E1tSQ3OQJR+/QihismJ5I/URBV9uLwYefirZ+UV4vB2Dt64oLS2d0tkTE/gFDsMq59+GUFTsSCtFo6e4XNJze1Px3I2Ok37MiT9m6RHHL6ulHmyrw9giVdOomHcrPXawdnd3U7cEX0KIk4CpgApMl1I+0O73lwJ/B8qjh6ZJKad3x2PvDorBqC5QRQ/TdItOhaLgy+vV4fz4eoDa+0eipmW3eXapWjQNHe1eVHXyz72LilkTyD11AoFFD5I3fALVC++kV7+BbV00rz3M+naP82vMdrUvxlf86QhVQ/GkUHzh/ZQ+ez15p06getGDmI0BsEwHSlE4Of45cVu2qu4auNa8OhUZHeQrgaqFdzlt34pG4enjsSwLW9pu8Swk27Z/KiUZtvMUf03ZttUlfq2cNhZMo41fmida+J3ILyDqCN9E3vD/h7RNal68m7/NWPyj+GVEwmxa+xXrPnyVVKOWwwZm85dz9ibVPwhlO+c9dqbMND8XHDsMgMsBy7KpqG3k1U9m8+myOuplGkNOvISC3v279DgNNYEEfrTnF8DGRy9B8adjNgbaauWi7vcxfsVqWIWiufwSCAKvRgNnRaVq4d1tvmlCJX+404TQWQNAUh31o4MvIYQKPAYcD5QBK4QQi6SUa9qdOl9KOe7HPt7uqBiMVk4biyenGDuaYjfrymmtWE+4sRqrRd+iEZ7wpFD+j8udMRq2zeZno4XlioriTSV/xB1RoEmEqqHnFKOn5bjA/LVdPFtr2Y4Hf/nGtSgZBQjN444kUTwpVM6/1VkNKqqblpdmxPlicOtNBEL3snnmjc4w3GAjgOO8LQRaRgFmQwWevD5semacOyw4NiQ4pmTb9s5XkmE7V/HXVMPUK9GyCpBSuvwCJyNzy6XDOw2IhO5z+RU7N7DooUR+uUGZRM/rjTQj2NEM9I7wK9TazFfvvExw3Yf8YZ8iJl6yN6n+zue9dodUVaG4RxZjTj6AMUB9UyvPvTmT5f+qJWfwb9jn2LPxpaS552+JYdI2t8ov5yQLq6kGFNUtpgcS+CUUpQO/YjsjBRfcn8AvcIrwk/zqmroj83UI8L2Ucj2AEGIecAbQHlxJdYM0j9cd8SMUHX9+H7wZPdwRGZ2pcNR9bHpmHIUXT8JqrHa2xSwThKBy/m2U/+MKAKrm34qW03El+mvT1mAcv2qEqMmEGUFazrDdvBPHUjHvVvoMHsIP679z6jdiXVdmxKk1iX6x5J44Di27J1ZDFYElk0BK9Nxe0fOdADpWj2HFTQ5I6idXkmE/kVRVQ48xLMovAE9GHgPGPNopw/JOvp7qRQ9GO409mLXlUXf7Nn7Fjxv6MarZXMrqV2fQQ9RzyWGDOPy0E9umXvyEykpP4fozD2GcZfP1D5U8Mft2mvzFHHTmVaRsJZu0LX6B00DVZ8AeDr8UBWzh2nTE+BWp2kDuidegZRe7/Op92VRKnrkOLSOf9vxKquvqjuCrGIh3bCsDDu3kvHOEEEcC3wE3Sik7d3lLaqcqlr43asvQMguctLGiOp16z91Iwbl3dqiu2J0V8x7SNL3NEygvn4nT5vKnUw5ygmBw6iSEguJNdUcxRZrroqtKp1NIWmE3UItJ0T3RFX3yXf8ZlWTYriIp0bJ7JvDLDjZRPPZZEDg1S2aky1eTEQ7z0b+fomf4Bx45/1CyM1J2ytPvqlRVYZ8BRTw2togNm2q4d8ZEfEOOZb9jz9quoHBb/IJoN+R28Ku1Yn1CSQWQUISfVNfUHcFXZ5+A9s30rwBzpZRhIcSfgOeAYzu9MyGuAq4CuGjCvRx5+qhueIq/DsXMSWPGeVZzHZ70HFTflkEhbcvZ148p7oJ1W5BtC7OmHKulnuo5f3EfC3bvVc3WOqPuHzcKaZlsnnE9kPiBl7ZJmk+jvsnCFgpKSiaFF9zP5uf/7JjatititVrqqJx/G97MHlv9Wya109RtDEvya+uKZ5gdbMKTnuMe70yR5jq35gjowC9nm8yxQxCIBIZtr+np1x8up+rDf/GXsw9g2IBjduyF/QTq3zOX6defxOufruPph69jyPCr6LXHsC2ev83Ozi3xy+qcX4BjTxTzZWtXhL9pxg1JfnVB3RF8lQG9437uBcSPfEdKWRP34z+BB9mCpJRPAU8B/PPd9UlHpDjFjD/Loy7zpU+PI3f4ePdYXaCKWy4dTlNtdVv63rYILJ7keE7Vxf1ZpMSoKcVqqUfLLMCf3wdPeg77jnvCPWV336/fWmfULZcOR9G8FF061TkgbSQgTYPyJ51tXCkU1JQMF1gdvuEVFT23F2pqNmZzTcJ7n9RPqm5jWJJfW1c8w6pefsjlV+xYXaAKTREue6QRJrB4EnawEbO2vC34cvlVi7QtPFmFaB5vAsO2xa/W5iben3k/ZwxNZ9T4k39UAf1PJSEEJx48iCOH9WHKv+ezYu2XHHTSqE6tKrbZ2alqHfgFUBbtPG3Prw6K8iumJL+6pu4IvlYAg4UQ/XE6gc4HLog/QQhRJKXcHP3xdODrbnjc3UbtiyvrAlXoaTkIzZtgO6Gn5RDEg2nH2UjoHgrO/Aub596C3qMvQihIaUe3vxyQWQ2V/DDtEoS0Ex5nV+lmbKqvZd7f/8yom/6PtMzsH3Vf2zsn0pGk7LHR7v/BKQYWiuI2SJimgepJYfNzN2A21VD+xKXueUBbrYptduFxk+pmJRm2E9XZNVUXqEJN79HBNkfaFiZtgUSMX1UvPwSq5hZ429GuYcWfgdVST/lTVwIkMGxr187Glf+j5J15PHHF78jNTO2Ol7nDCtQ388cHnuepiRdv93Pxez1MHHkY//liHU8+fRdHXn47qqZ1jV+W2YFfAESL9tvzC8BqClD++OgEfgFIy0jyq4sSUv74xZkQ4hRgCk6b9jNSyr8JIe4GPpFSLhJC3I8DLBOoBcZKKb/Z1v0mV46dK956Qk/LcY+rvhSsUCu5w8e7+/srp42l56VT2DD1ArT0tvllsXESVlMNfQcP+UV0NJav/5ZQa/MWf+/1p9Fr4J4dgq2lz07ihzeept/xYzj5svFbvH136pZLhyd09cSykRsfudA1jgw3ViOEQv7IexHCqZswasrQ8/tT8dyNFI5uG8xdNu0S+g7c4xfxd/g5deWRA36W4redwbAkv7as+8eNonTjugR+gbPNWHT+PR34VfbcBMzGKtRUZ3EV45ei+5DNNUx75cPtfuzvP32H9A1vcOv5h3fTq+m64gOu5xa/zyuvv0ODTOGNR2/scjD4TUk1t8z7jAPOvZ6C3tvvsRXPsBi/ymdPxKgtx5Oek+TXDmhozwwOH5S3XQzrFp8vKeVSYGm7Y7fH/X8iMLE7Hmt3U3vDQVs6k+eFBCkg0lQL0sKf35ehYybxyX0jqPz3g1RGb28HGyl9xjHzLLro724BZjhQQnG/wayffu3PcsFUbyqhYt1XVHz1AR+/9yYnH3M4Q3umM6hwyyul9aXNrHk/zLsrVlFdtp6Xpt3BCaPH8/Wb85l8tM74N+dxxNmX/ejs146qPGpqmH2KEwBKyyCwZDJaViFmfYWbupdmBGmbzoy6OHU2Ly6pn0ZJhu0ctffNizEMKdv4BS7DaKym8t8PUiWcDjyXX4qG4s9wGRbjF3StPGLtR2+Qu/k9/tJNgdeOZK0AZi75gLqKUh5b8DbvfPwl9xyXwqULanh84dvcdsXwbd9BnPbq04P54//Adf+chjx+LIX99ujqywCIBl5l5J97lzuPNsmvnaekw/0vXPEeOZ9PvZKeF/0fQtMxasraxgPNmkC4ucGZWyYUCi+ehKLpIBTXeb3ssdFsmjkeLTpmw2iuJZyX/5OmhRvrAnz12hyaN33HN6u+4O6rTmNDThOBlCCDM0zGnfEboCPQ4n+WUvLlh+8w58oiRsx5n4f/9Ba9UsJsbk7hhOJW3vvXsz9Z9qu97EgramoWel5vKubejB1ucQpRnxzjbkdK20JNz0Pxp6Pn9HJLvYWi0FCTHC2U1K9L7QdAxxgWP94M2hgW4xeAontdfm16ZhyqP8NlWIxfsP1bW19/8Cq9az7ixhGdNbJuv+J5FAuinlv8PuMvPKHTc6SUHXi2+D8ruO+kbE5/5k2u+30WWUqYMQd6eG7ZB1w94pguZ780TeXRq47lsilTyLryoQRPsO2VHWlFTctByy6mauGd28cvAOHwqy5QtUWvyaQ6Khl8/QIVv1qMn+VotTYCAiGcffb4sUFC1RLqJ2IDOUQ0AJO2hQw1kRYDVt7An+wiqa+p4tOX/kFvTzP3nrYf819XqVnTzEcrv+P5pf9j4SUF3PzmCkYPd+ov2gMt/meA4YMU9sz30jvVoKUpxJOn+gnb8Nr3Fv97eQb7HHEKy579e7fUgG1JndXhSUl09BDISCtFlzzs/N8y3GLhyvm3kXfy9aBq0YG1UQnFNYVMKqldXfGlEfHz/2IMgzZ+xSwTHJNUor93dmxj/LJa6tE03WVYV/n19XuvMKD5c8ad9Zsf/dpiPHp84du89v7npAqLl978yOVX/DkxZrXn2fBBCq9/20K6x+aIohCZXsHo/XTmrQ7y+MK3GXvO0V3OqKmqwn0XH87/e+Yufj/mbry+rU9ZjGdYG7/SEWLH+KWnZSezX11QMvj6Baq9y7o3rw9mJEzZk2MAiVunFze3y2ysZsPUC53/N1S69yWE4haptn+MmKP0zgrCmupr+WTho/TxtfDIyAPJz053V31PnJ3Hif/8gJ4pFu+ta+Ho3hrHj3sU2ph4AAAgAElEQVSYeX+7yv392MUrGH7E/rz81sfccrjK3cs/RAjB9DPSOG16CWurQly2v06WX9ArQ/D8Oalc+lILz958MR47xKPXj+DaqQt3SgDW/j2L1bCAY1YYG/TrDNHF6dJSEjuShFDcbiE1NQu7pa7bn2dSSf0cijEsnl8SKI8yDHD55fBMOuO6ALOhwr2fGL+EoqL6UjCaa937394RNuu//B/9Gj5l3Jk/PvCK59d5sz8gWwtjCEj3tnL8uId5Y9qNSCndc656+WMMy+bOw1X+9vbHDD9if/715kekyhY21jjZrmwfZPkFugIXDNN5dtkHtIYMStav5fEX3+a2y7d/G7JXfjYPX3QA45+9h2PH3rfVc+PfszZ+RdW+FjyeX/F2H1GGuWOGktpuJYOvXUTbqtwVikrByL9RveihuHS+wKwrxwiUgoDC8+916yRi2hl2Ek31tTw18TIO2auYaVceTU5GKt/+UMmhl93PGUftx0HZzaTqKaSpJg/+wcfNbzXQFBGowmb8lPkMH6QwIEfnoOxmxk+Zz1HFEfqmWxyWb7A6AKOeb8CjWBiWZMEakyc+MfDrkKIrVLXYIBqYeFw6E5f9wGszp3LOtXd2+2uEjvUsMQloA1TsX0V1g+Cqhc7ziXU5EhtJJEjOREvqV6mt8su5YNAy8pCWiZZZSCx338YvgRVq7TLDajaXUv3+PO6++rguPd/2pQ/f/lDJSddPdfnVNyud3xZE+G+JxROn+jl7fivFGSEef/FtUr0el2EHZjfydUDSN93DUT0j3PDwXMqrailIhbBp88JXBo9+FEFTINsvqA1Kwha8+Mb7TD3Bw8TX/8fV53RtG7IoL5MLD8nntTdeYN/jz9viedvkV1sCEqCNXwvucI/FM8xSVOqakwzbXiWDr11YQlFo360qVMctXWgaQigIRcOf3wdnfN3OV0NNNQ9ccSK5eohBmT0ZfdcM6ptb+eLbUgrSYNFbHzHnLC+Pv1vBhfvqFKULMjySsGnzuz4ar3zzPff8tpjV6zexfw+LJ5espSQLPi3RuPIAnWc/DZKiC+491stNb4TQhWRAtsq6Wpu6oEWmT3DaHirH9ZFM+J2XR5bNZ8AeQzjgxJHd/lrb17OsnDYWqfvZNHM8drCpLS1vmY4xoZQIRaHnVU/hfLFsIrB4EoUXT8asKwckfQbtDSQ91pL69alDMiXGr3a/UHQPthGOTn9w+OVJ63r22jJNPlkwhWf/eCS6tv38C9Q3c8J1U/DbLRxz9SQy0/wd+FVaWUuGB04apPHGOoM+mYJDi1We+ddb5OdkMPU4myWf1vPCqjCKkIxbajLhcD+PfbCOVF1w9cEOv5rDDr/W1ljUhwS5fkFFs82J/eGYAR6O2xDscvYLYPihg1n+5JvUVPyO3MLiTs/ZJr9iBfW25TRIxPj1x+nu3yzGsLzhE1A0DwJ7hxohdkclg69fuGIuxZZpYhsRx8AzyqqYx4ptRJx9+ugKpGJWW8G51VqPoqhIM4LaiRFfd8oyTWbefgUpdhNPn5XHla98QKZmUNkYIS9VEDIlZ+4laDbgyRVBFozwc+3SECUNNg/+wcfVS0OcNlhl7ifVjN5XY8YXBoNyFDbW26R6JO+XWigCzt1bZ1WlTc80wbp6yV3H6Nz/foRAi6QoTeGcIRpexWLswT7+9XULpe/Nx+P1MvToM3fq649ZfchQk5Pwsi2na0iIBDNCEFQtvAs72IgdbKRyzk1Iy3HxbkrLdI0ok0pqV1c8v4AEhok4U1PbiKDnFiOEyqZn2maXx/glpO2YfnaBYd/8bxljjuhDeqqvS8/58YXv0FhXw+/39PHehjrK6msS+KWpgqOn19IjBbL8KhXNNv88zc+E10NoQuIzG0nXvNy6IkJhuqA1Ahvrbd5aH0HdIr8kthT8+XCd298Oc97eGqoiuPwADxcu6nr2C+C283/L2JlPcPzYe7fr/Pb8UhWBZZoomgfbttzaPEXzUDHnr0gjhLRM7GAjgcWTEKqGUDSKr5nWpee5uyoZfP3CVdh7ABWl67FM0/lwC8XtkHOM7mT0Q68QWDrFKbJXVFA08offQMW8W8nOy6cuUIVlWVSUrk9wPu4umUaENx6fSEvVBq4+xItmBdGtELce5eXu/xhICevqJP/62mT2SoM9chXu+k+YxggUZyh8U2PTMw0WrzUBk6c/DZPiESw418/IhUEuHqZx97thUnXBxftpXLM0zGG9NRAWH5VbHNVX5c31krOHaAzMUahplRSkwYXDdP755feUbp5ES2MDh5w+epuvZUcVC5rWT7+WtLR0ShfeiZ6WQ6SpJs6fyCmqt8MtFF48CbOh0h22rQhBVXQ0SlJJ/RoUzy8ggWExfoFwiu0ty7HRUVSEx0/OH/5I9cI7yY42CdUFqrb7cZvqa2le9TonXnvCtk+OU6C+mbmvfcCoYTrL14W452gPd70bhjh+zVll0DdTIWhKWg1JUZrgwCKVHD+UNEjWBE2OfMZM4Fe2VzLzi8hW+fVVlc23NTYXDNPJ9Ao2NxjskadzXO9W/u/51/mutLJLBfi5mamctlcKn324nL1/+4dtnt+eX83LH3G9JGMMk7aNlDZ2JEjRJQ87xfjRzlUhBBWzJnTp/d6dlQy+foHqrJNO+NLRMgsoGNVWRFn+j8sRiup22IGDMsXjxw41I1HQ03Jct2JvXh931ld3yoiEeXf6HfRVAqxXDcYckMZjHwcZPlhl3wLBOXvrfFhqkuNXqQtabG4R3HyEl+teDfG347w8+lGEl742OHaAzhvrTTyq5LuAZMQQnV4ZChcN0/n3NybZPsFJgzQufznESQM1/lNiMeUkH9csDXHv0V5eXGPw/EqDmV8aVLdIvBrkpWqEQzZpSojXZk5B2gaHnjmm29+DmFZPn0Ao+iWhCAWjuRYpbXJPvg6EQmCxAzi3ZNW2qJx7M3YkiACsljo+mzIGu7Uh2bad1C6r9p10wufYQei5vcg/9y7AsZewWupQUzMBgYxNiQDMmlJAuvwCZ1vMsqz2D9WpPvn3Uzww8pDtGj4dr8cXvoNXhvEoKsf1Vzmop8o5Q3Q+LGvjV2Wr4K5jvFy7NMQtR3h4fEWEj8st1tVJBuWo/FBv0xBO5NeKTRZhy+6UX9cuDXHn0V4WfRvCtKExLJn+mYEEijI0msM2dV+8y9AiH8df+3CXjFgv+sO+LH3k9e0KviCRXxDHMNsi95QbCCyeFJ2OAggwG6rAtqhacAcyEsRqbeCzKWMQQmAHG5MM24qSwdcvUO0/rLdcOpyUE28kEgo5g5mByvm3EBuvEd9FFwu87GAjFXNvxt8j2k0XHWgb748D3TP+YcWiZxhzWA+ufehVLh/mPL831xs8eJyXvBTBxftqLPnOpCVi830d/OkgnX5ZChfuq7O6yuaw3ipfV1t8XG4x5UQfIxe0kuET3PBbneaIZMyBOrNWGgQNyQurDbyaoCEsOX1Pjd8Wqxw/QOPtH0xG7qOT7hGM3Efn8Y8jPL/KwFIUVGFx51FebnytmbX/eYm++x+5w0aE21JnRcGrp0+gcsEdeDN6YAebsBqqkHFpfGmEKBo9BSGE62sUqdpA8zuP75TnmFRSO1vxDHP5FQ6hZRe7DLNa6gHR1nASleLxYyGonH87KQV93eOqL4WKebcm8As6Msw0DdJCFfQp2PLQ6c4Uy3qN3FPjjXURHvyDlxy/YPR+Gq98ZxIy2vjVN9Ph15pqm9/30bj1LacW9YlTfZw1v4Vs0cavPx2sM3eGQaBVMu8rA7+eyK8/DNB4Z6PJmXtpvL3BZM45KTyxIsLsVQZljRYpPg8Zngi3/F7n6le6ZsQqhGCffJXKku8p6DNom+dvqalhxT1nUb/sEZdf2BbSNAEJioqMhCi6dKrLL2lGsBsraV7+SJf+BruTksHXT6wtdZjEANL+dzFHaG90xWcEfgDADjZRcN7daDkOzGL1E5ufuwGhOm7Qmqa7qeT4lHKHAas/Qps3fkuv8AZWfhfGS4T5qyVPrAgxfLBKcabznHL8CmfsqTHjizC5fsEZe2oIAVf/RmfkghB//Z2Hxd+ZXLa/zoFFKtl+wVlDdHpnKlS1SJojkhF76yxcE8GvCU7dQ2PeVyavXujHknDRvhpnzA0SNCWmDU9/bqArMCBbobTR4MJhGoNyFC7dX+f5NSV8Pu9B/nDj4+he74967Z3NUTOaazvUpQwdM4lPHxjJvuOeYPX0CdQtnewUtNZXOHM2LdNxjQawLSKBErbd35pUUj+PtsSwptpq0nN6dDheW7XZ4ZckcTg2UHDe3eg9+oFo8/va/NwNUZ6JhPrHoWMmbRe/vnxjARcfue1Ao71mLvkALxFmfmk6/MpQsCRk+RRO20Nj9so2filx/Lr59x5mroww7jc6A3MU0nTByXs7Wa/qVuc6PnuIzr++NoiYkjP31Ji3uo1fF++nc8bcVupCEkXAcTNb8GuC/tkK6+tswOC6QzwU+g2uONDDzC4asV57+kGMmz2HgstvTzi+vfwCpwmiPb+shkqwTFB1pG068zZj/LJNPN6u1drtbkoGXz+x2neYxBS7CGK/Wz19AlaoFRsFhKTqlUlIKVG8qRTGth67mFLvbkkp+eqlacwdfwJn3fQ4puJjQ3UTHk1h/mqTF1ab7rl2tDvm2kN0Uj2CHL+gZ7pg+J4a75WapHoEp++p0RSxaQzDom9NFn9nYkuoaZVk+QQtEdCE5NnPnZqx2atMLt1foArBiL11Xvw6gmXDPvmCiC248ygvl/w7yEkDVTwKXHGAzoI1QYp9ET5Z9DSHnXv1j3r9naXTb7l0eIeautXTJ2DbFp88cD7SCLdlKi0TQZslhaJ5QFHRswqxGysxSSqpX562xLBPHxjZgV/gNMZVvTIJpER4/BRe8EDbjXYCwxrWfcYRpx3T5du989l3mIqP6pZG5n5lb5NfhWmC0/Z0su75qQpn7KVTH7IJBGHxdyZL1rbxSwL9swQVzTBrpUH/bIXnV5lctr9AFXDmXhpL1ppELIm0YUCOwj3HeBm1MIgqLE4Y4CPH7+wizF8d6lL2KyPVT4pRixEJo3vaFpw7wi9k1MdQUZFWdPvR7QATrqVOpGoDhb0HsH773/7dTsng6xcqK9RKz0unEKwqQSKdVK5tUzFzvFu0Ha/OjFd3ttZ99i4jf9sXVVVYNGkcZ930GF6rBdufTb+UMA0tIUCypsokRRf4NFxfrpgDvy0hbEou3k8n0Co5f2GQyw/UufJADzWtklTdgRVAQ1jy2WYTS8LEI7zcuCzE0rUG9SHI9AlaDDAsWFtr88eDvQzMUbhgH53PKywG5Cjk+gUjh2rM+t+XHHJ4OuFgK15/yk55b+K/fCJNtagpjgmhZZn0vvZ5KubeTGCJM5jWDjY6GQFFdacXJJXUrqwYvwBaK9bhyR+AlDabn7sRKe1OEZXIMHYoMKuvqWJw7o59rcUYppvNmFKhR4pjdPV1tUWK1pFf4DAsaEou3lenplUy6sUglx3g8Ks+JLFtmLXK4LuAxX3H+Zi7ymD2KoNbjvBy7ashXvnWwKMKqlslA7IE5U3QbEn+0F/joCKVs4ZorK+zSdEh1+8YNl+wj8azXcx+7dM7m6rS9RQPHLJd52+JX4o/AxlpRXhSXH6hqC7DYlvKSW1byeBrF5AQAiNQikRitdRSMXN8tF7CUdsKxLGbsFrqQQikonRIK3fnLMfST9/k1tH7Eahv5tybn2LdxhLmj0hh5IIavBkaTw73cs78FhQgP1Uw6QQfaR6BEPCnxUFsGxaO9DNmUYi3NljMWWXQK0Ph8Y8j/PNTp0NSEZCbIshPEdx+lJcFawyuPtjDwGyFUcN01lRb7Feg8kGZxRG9dRZ9ZyCl4KSBGvUhyUmDNC5+Kcg/P43g0QSpHoFmWxToQVa9uZCDh1/Sbe9HvOK/fIJVJaiZ+YBg01NjsM0I+SPuwKzbBEBg6RQ34yU0j7OyTCqpX4skLr/sljo2P3djW8aXRH6BwzDHvNPoMr++++8Sxh8xeKvntFfMUPXKM4/k06/WMu1kZ2F3+8kpjHm5FSEl+anKdvPriRUOv6IvHQUYlKOQ5ROYUnLmXhpH91M5ay+NNdUWR/TRmLfa4O8n+Jj3lcHcVQZH9lWpbpGcu7fOJS8FOXJGCzk+yPApSCnwqmqHeZJb01mHDeKudz/Y7uBrS/xybHEsCuL4pWX3pGL2TQ7DEix1ktqaksHXLiAtswDd48WMhFH8GeQNn0DF3IkgJWZtOTWvP+ZsZ+EUsQpFRUoLTRHdWt8Vr6b6WvqlBElP9fHP2a/zQ0kJZ+2pkapLzh2qs7bGItcPp+2ps2B1hLOH6PTNUmgISWwJv+mpsqrKoi4Ifz/eR01Qcu2rIe48ysu1yxy/nJAJ5+ytc/kBHhQBT38WIS9FYfT+OpoiOH6AxuyVBrl+Qdh0gjRdEfyuj1OrgQ39sxXO3Vvn000m6+sl9SEbn0flm42byWRVt78vsToKo7k2zt9IkrDUlxIR3WIUQkGoGpVzbwZo+9upOsV9+3f780sqqZ9cQuDPd9zR1ZRMck+5ARTV+cxLSeXcm5Fm2D3dZZhtdZ1f9aUM6XdQl24Sm8M49oGZnLOXxoBshQv21XlrvbFFfh1SrLKq0qK2E35dtyxEU1iSmyKoD0nO3UdnzAEefqi3WbrWYs45fnRVcOVBHk6Z3crnmyMMyVd5e4PJiCFOZ7dpQ11Isn+hyqhhOi+uMagPSVpNG4mgZ14G73z23XYHX0W5GYQrVm7zvG3xKzYKKp5fZkOlyzARK6mwLcyCom5d7P/alAy+fuGS0dlnRqQNTtI2kJZJ1cI73S3IgpH3OL9UNPSsAozacqoX3rnTWn2/fW8RNxwzhEB9M/Nf+4A8P4wcqiElnL6Hxl+Wm6yvsxn3G53nv4wwa6XB7JUGdSGJR4UWw9luPH5WC4pwtgsv2ldnaL7CZfvrrCi3WFll89I3JgvXmKTokqAB1xzipTBVQQioSxWcOljlPz/YTD7Ry9/ei3DWXhrPfWnw1oZWpHQCOH80E37mXjolLV6OPeZoxl94Arc99x+a6mtIz8rttvcl9l7fculwivsNpuT7r0HRohkto8P5em4vCs6/n8o5N6EKwb7jnuj2poikkvq5ZEbCQBu/3K1F2wS7jWEuv3C8wLTMAsr+cXmX5s9KKYk01XTp+cVmNd5yTBqXz6/gqoNSMGy4Yn+dC18KMfVE7xb5ZViSY2a0UJAmCLRIRg3TGZSrcN2hHv5bYrJvgcrTnxm8+LXJ0rUmti05Yy+n01sTsGeuwsmDNF5bZ/LoyT5GLmzlqH4aZ+ypMeKFVrwqmBJUBTQhyPILzt43ix4ZPig+aLsDLwBFUUhP2fqgbdg2vwQiYcdYy+mJNE2KLp5E2WOjOXjifKD7G7t+jeqW4EsIcRIwFVCB6VLKB9r93gvMBA4CaoCRUsqN3fHYu5o66zCJHYe2wvvYygPLcNK70RWFHWykasGd6LnFFF08iUighJolk9Hz+mLUlkVdop2xQnpazk6bMt8U2Exx3mCm//s9MmQzx++t0z9boTYoGZSjcOZeOou+Nfm62iLLB/VBm9cuSmX+GoMNdRavfGtxUJFCYwTSPJL1dTDhMA+9MhT+eJDO7FUGv++t8NYGi57pTjHri2tMFqw2eOkbA4FTA1bb6kCvd6bCAYUKL31joqlwbH+FN9ZZDMhWaI5IeqYLFq42kMJCia4YD9+7mDdWf8yQ3528U96jeAmhdFqJFzOatJrrMG2LFfeNQCAYd9pvyczNS85H+4mUZNj2a0sM0xTRgV9WdMFh1DvDsmNu6FZLPXr+AApH3cemZ8bhye+PtO0ow3oDIITKgDGPdmlMTVFW1zqYZy75gOGDFN5YXctFw3RUBTK8gtwUwfA9NC5fFCQvBTI9kiy/wgmDVOqDkpe+MRmULTCl4Jh+KsvWWZwzRGdAtsLF+2q8/K3Jsu9NDu8t+KhcsmhUChe+GGTJWpN3NrYQm3ZU0WRz1hAdAZwySGfEC60oQuDVwJLQM10hbEp6pCqoQvLk/+oZ0DObnpXbn/WKKdjagm1ZKDsw6cTlV/syvGiNsW2EkbbJivtGEDstOeNx6/rRwZdwhgY+BhwPlAErhBCLpJRr4k67AqiTUg4SQpwPPAh0/7C9XUDb+yG8f9wompc/Qk3VZrT0PPe4mpEPtknuydd3ejuzdpNT0GpbRJpq3UGn3X0BZIkWIobFy299jDQt5n0lmbPKoLJFuqPaDFvSJ0OhKQKFaQqHP9NCmgeCBvTJEnxeafOPU338+Y0wZw/RyPYrhC3HmuK8vXWe+jSCX4dj+mv8t8SiLiTxaZDmcQjgiYLy3L11+mcpjNpH57MKmyG5grc2muSlKNx5tJcH/huh1ZB4NLBUL8/efhkARwztzcKX13Xbe7IlCaFE268tpG1R/o8r3FZ6xZ8OCKRtUTDyHkTU80jaJqqmUTLnZq459WCyehQm3GcSaN2nJMO6pu353G2JX3pOL3KOH0vl/FvaurbjZZlRqxWQtsXKaWMxmmt3SgY/lvWaeWYKl8w2qQ1KnvnCcBuBnHJ7iV8XznDsLMkHpQaKwB17tleeYMEag6sO9JCXKrBtyPAqnDJY4411Jm+ut7hkfye4yk0RVFZLbClJ8wgsW+LRBCOH6igCxh+m88p3BqftoTH3K6ec4u5jnPqz+4/zMuH1EGkeQWF+nsuwrmjf4lQaaqvJbseSbSmeX2ZjAGmZlD8+mnh+CdXZJi668O9AG78s06Rk/m1uIBZTkl/dk/k6BPheSrkeQAgxDzgDiAfXGcCd0f8vBKYJIYRsPxU6KWDLPjqKJ4XcE6+h6t/3g2ViBEqpWToVq7nWGeJsW0gsJ0umaCj+dPKGT0DVNErn3dqtAFNVhZlLPuCo4gh/3C+VbJ9EU+DRjyI8vzLCYb01PttsccOhHm59O8yfD/cw7tUQ9UEHQof3UlmxyWZ1tU2qDnO/Mpm7qtlZEUqwcZqdBmarvLXBpDkiCZnSKTbVoD4o0VWnRTvH7wRjxRkKBxYqLFhjoCmC84Zq7F/odAy9X2KS6VNZE4jw+MK3+WJtGZNvOK9b3ovOFMsOtP/y8eT1JRIocX3YTNNAqCpCUahZNo2iSxxfI2lG0D1elJRMNE3v0NqfHFrbrUoy7CeUUBQnyxUoJfDqVOxgoxtwtZ0kUFOzyD5lPNI2Kd0JJRSxrFeotZEZZ/rJTxE88nGED0pN+mcp/K/M4o6jfNzxThjLlqR6wK85Mw8n/t7L9ctCKEIgpeDxTwwe/6StScgGIqZEKLB8vcVbG4I0hW1MG7yqQnWLs315xl4a6V6niD8vReG8oTrPfB7BowrO2NPZorxgX533Sy3OGqKzbK3JuvUbu+x0D6CpCuFtnwZsjV99sCOtIFRUbJdfAELRqHnjCQpH3efyy4g41jpJfnVUdwRfxUBp3M9lwKFbOkdKaQohGoBcIND+zoQQVwFXAVw04V6OPH1UNzzFXUvxPjra91+7xyvn30bF3IlI2yawZDJC8yLNMEpqNnpOL7drSKg6Rm0ZQtXQc4rRPd6dsgX5zmff8cU3LTz1YaIjVciUlDQYjgN0wGbUMJ2vqmz2yFH4vtbmhIEqX1VJJp/g47plIR44zscNr4eobZV4gRQdGsNw7t4aNx7mpaJJcvs7YU4ZrKAKKExXaDUk75dYLP7O8QNL8wjXUydsQaZX8mGZxdiD4byhGi+uMShvsgmbMH/ZB2T7YM6yD8G7fd0/XVXsS+Lq4YfQ+7KplM+eiB1pdcAF5J063okupURETQpjo4eS+snVbQxL8stRjGHa9187n28zAkK4DAOoXvQgdrDJ5Rc4nY8uvxQVPa+380W+k/hVVhHioeX1CcdDpuQ9YTFqmM7QfJWiNEGGVyViQY5PcMwAjWyfIMMruPtoL39ZHqKkwSbbLyhrlGT7BS0RJ0t//j4eLj9Ax7Bh1MJWxhzooSEs8arwcbnDrxdWm+SlCCdok2DYzrClj8othu+hOfVn/3LMXB/72KYwXVBb1zWn+66qPb8Al2Fmo/ORzx15D+5sTkVDWgY1y5KO9tur7gi+OjNjab8a3J5znINSPgU8BfDPd9cnV5VRefL7o2XkUTR6CpFACXpOLzbPuA5N05G6n4qZNyLt6NwzRcVuqUPL2bltv4smjWPy7NdZ/ubbDMk2uOdYLxOXh5izyiDNKziuv8bDH0Z49GQvY5eEmXayj3MXBgE4bU+Ng4tVju2v8dZGk5FDdWZ8ESFsQp3pdCm+uMbkvRKbymabs4dofL7Z5t5jvdz6VpjZZ/sYMURHAjcsC/F/J3iZuDzM73qrvLDaoG+W0/E4Z5XBdYd6OGuIzgelJs0RMOwgh/fN4NX3PqXP7/rt1PdISNsZ69RUS8HIewHnS0fLKUYRgkhtWfRM56NeMeev2BHnPRI4NTIRBKunT0hw+06qW9VtDEvyq6OktEHV8OT1SWRYViHlT16J6k2lYtZ4x8DTtn52fi1cY6AogvP21rFsSWWLRACPneLnvIWtnDDAKZK/YB/dqQ3bU2f5OoNBOQqvrbOoC0r6ZimUNtgs+97k+VUGhiUpSlc4dQ+Va18NM/NMH+cOdbzBblgWYvEFKZQ22NzxTpjDemm8V2Jh2vDaOpObj/Byxl4aKzZbXLSvzodlFmfv7eG5Lnp9bahsopdv20X38YrxC+jAsJTCAUTCIcy6TUjbWYBLKV2GOdu2bdvHqi8lybA4dUfwVQb0jvu5F7BpC+eUCSE0IBOo7YbH/lWronQ9sQIqadtYzXVsmnE9RIMsq6UeC2dEhyNnFaLlFFMx+ybyR9yBlDLacdS9qm0KYZoWr330Nd9UBLnvaD+2DUPyFBQFzh+q8+lmi9P20Hj1e2cFV5ShcMEwnYVrDK491MMlLwW57lAPo18KErElaR6niyjHBz0JBsMAACAASURBVPcf5+XPb4S571gvN78VxqcJhu+p8kGpyfA9NQrSFJoNG8uG/QtVTpzVSp8she9rTWxgXa3k0VOc4d2T/hdGEQLDkigK9EhR+OiHVo4cmMHyr1dxVLe/O21SVTU6XNuietGDgBNQWY1ViMwCZ4vViiAQzt+zpT6h8wtFRSCoWzp5Jz7L3V5Jhu0sSdupHFe1ThkmbYvs48YkhLHx/LKN7mdXvNrz65BilbmrDEbtrTEoV2HWlwaFaYLf9tLITxNcMEzn3RKLdzea5Kc5o84O66Xy5CcRmiMWPg1MG+45xsvNb4b5x3Afz34ecYO1V9danDrY4ZcedGwrjumvceSMFloikuIMhc8rLCRw/3Eerl8W4h+fRJyFmiXxagJbSl44z8e8r5q7lP0KKimkpmd26f3p1X8wZRvWIoXSgWGhQBm2bSOl7fq2WU01zndSlGF6Xl+MwA+kFA5wg7ikHHVH8LUCGCyE6A+UA+cDF7Q7ZxEwGvgfMAJ4K1krsW1ZloUntxcRdx6apGj0FGerSkCkeiM1rz6CUHX8+X0wImG3q0gIgaI7nT87w7TTXzCAptYwv99vIHtppQzKcTodH/04QpZX8I9PI0gJHhV8mmDmmX6qWyTHD3A6gZ7+zKCmVbJ0rTNQdkO9TVPYCcBy/E791sihGuNeDXLOEJ0Fa0wyfVDVIlGFYOqHba/JlpJUj/MYF/4rSKvhzILsn6Vw9hCN51caaApkp6ik6pLemU7G7ZPKELW162huqCMtM7vb3yOA9JweDBjzKOUb1+LNc7yOSp+9PsHFWyDQe/RFTXcsL/Q8Z5iwUb0RpxjfINJUy8ppY9373VlFyLupkgzbWRJK2zitdgyTlkHFrAl48gdgBErRsgp+Mn7F1J5fM74w0FV47kuDF1YbBE2nqeeGQx3T02P7aVzy7yB9MqApLJi9yuCCYTojh+q8+r1Blk9QnC4cE+h9dOauMnj3B5OgCXO+iiTwK/7DEzYlGT7BM6f7OXNeKycOVNkjV+HMvTQWfWsSNsGvC/wa7NVDJy9V4cJhOs90IftlmF0fWDZx2lxuuXR4pwyLdWoLoTjzOZEJDDOqNzq1yNImWFWSwLAkv7oh+IrWP4wDXsNp035GSrlaCHE38ImUchHwNDBLCPE9zmrx/B/7uLuLNI+XCLhDZ4UQSKQDtU4khIJZW47VXMvm55yVhrRMvGmZQPdBLK//UJZ/8SEvvv0ZrY02/y0NUVpv4NcFlwzT+W+Zxe97qTyyIsL5+2gc2FNhU6PElyY4rr+zNTj9NL9jSBiRaAK8Kkw7xc+974VBwhl76Tz7hTOOozjd6S4SAqae7OW2t8OELcl9x/kYtzTE/2fvzMOjKs/+/3nOMslk30jCKiC4r9VS27d77UtFXOu+IFraWkWh8qvWXVtbrS0qinUpreCGoFZLAbW1rW+tvq1aXNG+omFLQjKZ7PvMOef5/fHMOTOThSQmRMTnc11cJGfOzJkZmO/cz/3c9/eed7jNvsUGX5tisvodj0tm2HQ48J3DQzxf4fK58QZ/rnCR0qCmXQlhSzec+NliXln/GF8/8wcDv+gRwmmJBitIH2FYuK31WLkl1K66BtndEaTyFRJPmIw5VnW5mqZJ23O3j9pz3pPRGrbrMC0Lz1Ndg700rA+Gq1/R5s4hPT9fv16q7KKySXUyZtmC6cUGM/e22FjnUhQWgX5NKVcBkdKvEJc828WKN1W5RLYNvzw6k5tejNEWk5x/uMV/P9RJzIWJeQZbU/Tr6r8q/fr1sWEuXteJK+Hsg21KsgQn7qe+lsfnCb69v80bNR77lxi4UvJ8hUtli+TI+1sAyEAMyum+tqGFWPbQuhx3Rk8N87u0kxp2NV5na8rtqpPK1zCtXyPk8yWlXA+s73HsupSfu4BTR+Janwb8TpPGaAQ7p4ju5rrgth0rFvq7i0jXwe1ooXbV1RjhvOAct70RPA8zIXbxrlbCOeERdRuedOAM/rL8KSaVFVEtPBpbO3G9ON/e3+LF7S73zMrkovVdZJiw7n2HF7a4tMckbTGwEwvhI8aZXHhkiMUvd1Oeq2YvHlJucsK+Ng+9FefcQ23OO9TmntdiXPeVDL67phPLhNdrXLJsmJRv8Fq1S0mW4Jt7W2ys8zhlf5u/bXYJ24KQBdEOydenmPz5Q4cT9rVZt8WEjDBSShraulnznzg5DS+NavAlTIuS2YsAqH92KdJRHjkIcNsboL2BsjN+jpk3JvjCcppqaVx/G+MnJ0en6KG1I4fWsJElVcOkBOl0K6NOzws0TCJxO5rZsXwBbntjoGEfVb+EEBi5pUN6nr5+Oa4HNHNQqaC2nUC/PmhwCZmCF7a4tMUkHXGwDSgJGyx7Pc6cQ2ye+k8cQwgKM+HAMpPZ+1o8/q7DnENtvj7FZN0myU++lsEFTyv9+r96jxP3s3jiPYexOYLJBYIdbXD0VItoh+SCw0Nc8kwXnXFV+3pImcGT78bZr8TkpANC/OEDg6KEYaoFg3K6/7C6kezisUN6b3aGr2Fp+gVJDesQjPvu/cmA2woRj25P07BPu35ph/vdiP4sJvAcMCzKTvsJVtH4lBsktY9cAUIw8fwlxGNK4HasWBi4pcOucRu2MzJoDxXxu2vnAvD5C36GBzz4pkrDxzw4qNTg3ajLc+dk0eVCjg2fXdZO3IV9Skzu+GeML0w0ybQFN34lg5//o5vNjR5fmmRy9lOdPPRWjGxbUJJlkJ8pyAoJxuYIfvt6nP1LTLY2u2xvkXz3iBAlWcp5OS9R6P9fv2sn2xbUd0oyLGVZcfWsiWx8oo3Hf7mQTVX1PLS1hMO+OTrfp6ZpBqaTRmYOiW8fpNNN+bmLEZZNvF4V39evvU0VKZN0BO8vU6DR7E70qWFCgGFSeuqNIGWahtU8tIixc5ewY8VCys++ddj6FbdzqYw0MqF0cGUEaxbPB+Cnv13Lg0//mVerPM49NF2/Xjgvi/Y4OFJy7CMdmIbgK5NN3o54VLd4NHfBXbMy+PmLCf2aaHLOU52seCNGpi0oyhRMyjcIhwTjcgR3vxrjmGkWpVmCZRti1HXA+YfZjMkSaiwacMRYM9CwaIfkgFKLX8/OJr+ohDdblIYNxWZi5Yvvs/+5Q/cGS6UvDQv0y7RBEGhYZNU1QFK/RELPNEl08LUbkWoxAQR77H6hojBtDDsjzUNFei5eZ0uijkiCELhtDbij8J996pdPYcb3ziNseHR1dnBAiUG0U/L9I0JMLVSeNS9sdcmwBNkh1Ub9mXKDylb49axMLn2mi8Yujxwb9ipQrvjrNjlc9cUQx+1j8cf/i9Makzx5WpjfvB6nKCw4YpxJXYfk2i+HWPBcF0h4YmOcZRtidMYhOwR5IcH+YwzuOTbMr17u5q+bHR7+dg7EO5k9zWDF2pd4OwozvvfLXfbe+F9CjdEIVVs2pdwiEKaFMGzM/NIgXe/HVkIYahXpuTgNVWBa4LkIU39UNbs//WnYlrvmkFU+lc7ItkDDQG1HxesrcdsaqHn4R0n9+ogdvpM/9y1+//JjXHriZwc8N9rUxtyfLsdxPCqra/jWNJv1m+Kcc4jNPsVJ/QqZAtsCgeBzE0z2yjd4rdrj3tmZnPZ4B8dOt7jvtRjf3Nti7fsON34tg9MOVCbRMi556MwwqzfGKcoUfGkvi04nzj8rXR44Mcz5f+gkZMCT7zk88EacLqdvDdurwCI/U6Rp2GAd7mNxhwYvi4whdDqmBtGpGmb6nl4JDUur5/MDLaF83FL1C8PEMIburL8noxV9N8ZfacTbGpCeh3TjxCIVICGe+M/utjdhFZRTcsyCIDsihKBm5ZWBkd2uGm76/it/o7mpkXCWasU+aoKF40mmFBppWagvPdBOQaYg0i7pdiRnHWLjSZhWJPjDfxz2KTF5+K04x0y3mPt0JyvejGEZgrgDU4oMnnjX4blNqp7sLxUO3zk8xLg8VZPhejDvMyEauySnPd4BMUFbTFLVKpnxm3YAYq7k8HtbsO12Sgtz8d7dwNcvuArLDu2S9wWSX0Ibly2ifm2yU7G7pQ4hzITDfQIhlEeb6yATaXoVSINdUE68qQYzrxTHiVOzvYLyiVN32fPWaEYSX8Ok59JRo7q3fQ0DcFrrqXn0x1gF5RQfcymQqAlzHeqeuIGKZZcMSb/GTd2P/1nfNPCJKJPVDyu2km17FIckz37gMvdQm7wMgeMl9euLD7STn6kyUK4neXmby+kHK5sIw1AGqa0xZfz89H8cHn07hisFlqEWlY+/67BqY4xMS/BKlctX9rIQArJtwTHTkhrWFpOcvKqDtu50DZNA3I1z2z9jmKbSsKGMF3rpnS0U7/f5Qb+HkB5Ep2pYvE01+EpJUsOESDZFpGpYin5Z+WUIIbSGpaCDr92YeHc3EvA8N9kd52dIjOQ/3cTzeq8ODcPcpYNNW5sa2PjXJ5hcaDE+y+HDRnil2mVHq8faTW3qOQjIsgRTCw1un5nJPa/GWPeBw0n72eyVL0DCmGyD+47N5Pvrujhpf4tTD7D57esxppcKtjZJrv5SBhev61LpeCHJsg2+e4RFlyM46yCbHz3fTUdcUhwWLDwqg6YuyQn7Wsx5qoPWGPzfpYX87vVufveOwT+WXU1Wps25d/4P+35h189zBHqt3P99y+nYOUVklkwg1t0FAgwrpFrqTQu7OOFtlPBFgkQRcmM1XmcrVY9cSUeJqmvZVUG1RjMS1GyvUP/HSVlQ+AhU5gTIyB/DuB4a1h3dRmFJ6UfSsIyyaWyurmfKuOJ+z4k2tfH7v/yL4rDkis/bzH9G1ac++Z7Dg2+pDFSmBTm2YO8ipV/L34jz29dj5GcoD7A/fehwwj4Wf9/mcv/sTC5c18Wx0y0efSdOabaqb/3hUSGufL5buW0Y8NnxBhuqPW7/ViamUEHXD9Z10dItOXysyWVfyKCy2ePPFXFA8O0DQhRlmYF+DWWr0ed3L3zIFy+aP+T7+aRqmL+gb+tyyCxRWmVYahHra5ivW3bRBIQVCvRLGFaahn3a9UsHX7sxErCLxmNmFQAEtRJOYzUIkfiilmrLsQdePL0zqL96so86Y+vVZ1YxIdTClyfb5NuCZzY5HDjGYObeGcx5Wtk9WIagtVtS0yb51sMd2CacsK/F2BxBXYfkf6tcvjHFwpVweLnBGU904ngqaHM8OLDU5LBykxP2s3g6UdR6+oE2xYkZkE1dQt3vyU5cD4rCgrIcwbzPqLEcjV3w2fubKcoyg66gd+oFM8758ZBf70hiZmZRvXwhjhNPjlZJbC1KJ46wMoisugYzpyi4j9vWSHjMBMLEdmlQrdGMFK7rIgwbq6AMYRhptV6+hoEk1trQS8OEYZGXkfS1HYp+Tf7cLH7/v8tZ9O3+g68H173MGLuLL022OWK8xUn7uziu5AdH2kTaJac+3oHrKf2qblX6VRgWmAJOOcCmLEfw0naXzU0ex++jNOywcoOn/+Ngm2AbgimFgg8avEC/isMGjguz9rHYp1hN+5ASPjvO5NurOyjPURM8irME43INmrrgvte6mVpsD7qrsSc7os2YRZM+0jDtneFrmNfemBwN5blgWHjxGNJz2PHgZUG5hNvWSCi3SGtYCjr42s2RMrG/bmdS++gVuO1NSFd1lgjDQLouZWepll+3JRIUONauvIqLZs/Ai8cQhokUUHbaTzEt9U9umiblE6d+pBlbftYrL9bB7GkhisMWlc0uz3zgcPy+Fiftb/PHCpOi3DANrZ1MzolxSKnJhh0uT77r8LfNLpEOj7AlOGk/G8eDCw4P8e8dHvsWC17c5nLNlzK49eUYVS2qg/HZDxxCphpiu2qjGn7bGpPEXZhebHBQqcGjb8UxhMFXlreTaQnumJnJoj/HcKxsLNPgt8+8xnEX/3TIg2UHoq8vhsZoJK1mZeOyRbhdHXieS8HRFwLgOg71z91N/brbcNsagm4vr7MFM3cMJbMWIgyDzJIJVC9fyIHzFuuZaJpPHFIqk+jaR68AVDejdBMmq64aKG8VjsdpjuCn9iOrrqFeelw487A0/QJlX7Ez/SqfNJXnnu6kvbOb7HBGr9v9rJfZ3c2cQ7OJuS7nH2pxyTNdCCOD8hyPsw4OsXazClgm58T4/HiTY6ZZnLS6gz996PDYO3EkkpCpNMxN0bAJufCvKo/HjwtzyfouFnwuxLMfOHx9qsm/Kl3+tsXl4TfjgX4VhgUlWYKaNo+ybIP36lxsU3DXMZkseLaLLhEmM8MaVFdjT65/7BUOPPWqAc/rqWGN0UgvV/qNyxbRFY2o9/gM5XQfWXsH0TW/RBhG0LEqDBNhmJTMXoRhhQL9Sm2g0Ojga7fCb8/2qY/swMwqwAjnUXrK9Rh2BjtWLKT42Muof2aJGqTdElWilhjNYYTzKDvjJszsAibNu5vtDyyg5NhFRNctDuY8gkrrf1T8rNdXp9iETElDh+S0A23+XOFy8qoOEIIDp43j+bsvY+alS3i/YgvXf9miLFswY7zk3ENsFj7byVETLMpzBTEXphQYzN7H4rkP4hwzzeLQcpPj97VYv8nh1ANtTtjX5sl31diixk5JQaagy4G9Cw0eOCHM1iaPv252WX9WFvP+2MlBpSbTig3OOiybov2/SCyUR8P4r7D3Z78+7H+nnvQsMgZVaJxa6+V2dTBu7h1UrkjWTzhOHLdd1aeogLolUdvnYDjdRNf+Cre9iVBuEWZm1og/b41mpEnVMN9mwgjnYhdPoPysWwDYsXwhmCYy3o3X2UJ07W0q6+s6GOE8ys/8OWZ2ISWz/x/RdYsD/QqVTgmajQbSr0NOuJCfr7qXn839aq/bUrNeJdkGm6IOhoDDy02++kArY3MNIu2SA6eNwzRNNlVs4YYvq5E/5x0a4txDVGnEWzUuR4y3KMlWc2V9DVvxRowzD7LZv8Rg1j4WL1e6nLCvzYNvxMnLUB3YJVlJ/Xro5Ey2N0l+sL6LR08Oc90LXUwvMplcYHD2oRm8647lqVsvHvK/xV83fIA59QsUFA9svzFQsxcoDSs/4yaanr+X+rW34boOMlF3DIDnJH8GomsX43W2av3qBx187Ub0TJ9fNHsGY46/ArtkYjBmw2mJEl1zK15nC6Wn3wSuk0jnK/OvmocWqe6h9ia2/+5S3PbGxADbFuJNNbiGGezVf1Q2vf4S27a288Y2l9tf8vBXqwJJXqbBtCITO0vVAcz83P4cEq5BCMnSV2I8PyeMJ1VR7UNvxVn9bpyuuEq1exIaOyU//ZpNQ6fkyLEmc57u5ME3Y+RnGkwpNLhrVibrNjn8pcLhrINtjt/Xoi0mGZMtOPMgm/s3xGiPw/H7WGTb8KXxHhc+8T8cf+lNHDTj6GG97p1Rs70CN7GaB7UidJrreO1npyBME89z2bbsYoxQJodcrFaAfoBWPPsyPA/sEjXhZseKhZQcuwiroIy6R68IVowaze5OqoZdPXc2bV0OhbMuw8xXAUDNyquUDxQo/fJcrMJxwX1qHlpELLoNt72RmseuQggR6Je/veUaZp+DNlMZu9fevPzqVN6uqOXgqWVpt72w4X1e3dbFK9s8Fv9vF9KTKKd2GJdn8sYPCrjur+287WTwpUOn87mCehBxlr4a4y/nhnGlMm8+9tEOGrscVr4dJ2xBdkhpWH2n5JhpFu9EPD471mTuHzrJsmFqocGSY5R+/XO7wxkH2hy3r0VLN5Qk9OvxjXEaOuHk/S1MA46ebPDEM1uob24fUr1XV3ece5/fxDcWDK7Wq7k+mtaV7ToO25dfhtNcx79vOR1QtceRP9xKKDuPQ+bfEwRo25ZdHHxPgdKviecvoaOmgqZn79T61Q86+PoEIaVEGAbl5y6m5pHLsYsnEq/frswLnRjCtBGGhZ0YSFt29q04zbVY+WXq/KIJatzDMPn+rQ8HP993+Tk07diM21rPgyeFKQwb/M/mGFf/9UP+b1stf37lPV7/Twe/fU0yOV9wyupOls4Kc/vMTB54I86Db8aYlK+G0AKcdYjqNsqwYMYEk1MPUAO3ncSA25NWdZCXIYi0SaIdqgC2y4GckBK/uAtzD7OZWmSwYYfLY+9KZh04hkjl9mG/7p3hum4wegPUSJTkjEYVnDY8fz9OYzUb7pgXnOd1thBZewfF35pPLLoNkZhcIAyDeEMV8baGtGzop71IVfPJRUqJ7O6g9PSbiK5dHOiRMG2kG0/TLyO7EK+9kdLTb0rTLwCnoaq/AR9pHHHsedxy/xUsX/BNbCtZ8+R7e/kcv2gp23ZEaGlt4+5jM3m3Ls7MaRar1myhq9th4wft3PWPOFMKBN9O6FdRWHDKATaPvR2jPFfpV1scpCc582A1cNsyoDxXcPy+FivfjlPf4XHiYx0UZgqinZItzXEeeDNGa7fSr9yQIO4p/ZpcYNDSLSkKG3z/C/lDrvf60QN/54hzrhp0rZcnvTT9Aog6McpO9+cGq/q8hufvp7Nue6BhwjBV9nL9HRTPWhDol0L20i/QGuajg6/dGMsQ1Dx2ZVBwDyTmafnrvqS1RNBHJEgUshLMRvPP8Y+PJN+/9WH+uvIe9tnxFPsdUALAaVNgOxHO+PnvOWRCMRcWt/Loa1GOGKf8cb69uoOisCDbVhmv5SeGOe+pTjodyTObHNZvcijIEDgSXE854v/6WFX/EDKhuUsSttUqE8DxJGFLFbNWt3rc+lI3N77QjRSCsWPLMVs9cjpG18UeJBgmVn4ZTnOtOhLrovT0mwiVTAqsQ5zmWqJrFwerf6dRzXPOLJkwrI4vjebjJicnl6a6TWka5nW2BEO1+yPNhsU/hlA1rt7g/QszwllM/ca5/PKJP3DVGV/o97w1i+dz2yN/gqp/818HJAdPfz/aDOP35/MH783yp54P9OuU1R3K9NmGvEzBQyeFOePJTj4/3uDlSo9nP3D4n60upoCYq7L504oMqlolnpQ4niDDFHTEJVmWCtI64xLHU/r1q5dj/OplNfsxI2RRWhgakrXE7//xHkz9IkWl4wY+uR+cWDdKw1SIYBdPIF5fGWiYXTRBBcxWiHj9dqVh+WWBfgGYlq31ayfo4Gs35s41/+pVCFnf0YQQfoxlACLolsMwlVnhih8qV/wURCiLmhU/xG1vIJRbTLytge4Ravfd9PpLvB7pYtVb6Vm1nNJy/reigd9vbeBzYw3eqfO4bWYm5z3dSaYFzd1w4n42AvjaFIvnK+JMLjB4v96jNSZpjyln9zMOCrF3kcEJ+1o88lacrJDgwRMzKchUb8TCZ7t47NthirMFZzzeyV+rQvz4N3+gdMLu4CWjAt76Z5bgtEXBdYhFNidvlh5eR1Ni1T+I5bxG8wnB34JMM+zsUn8Ha0QhVDbe1692NdPRCIXx2huDxxIZWexYsRApJV670sDB6Nekg2bw2rZNPL/hQ47+zN79nvfChvepjnTz6NuRtOPjat9nW009R09Vcx5vm5nJ+X9QC0VPCk7cz8YQ8I3ECLOJ+Qb1HaTp12kHhpj3GZtlG2I8+laczrhHhiVYcWKYvYsMBJI5T3ex8uQwj7wT508fONhjprL2tqEXpr++qZonN3bw5fNPHPJ9U+k5T6P2sWvwOltwO5rAdYhHtyZOVMbeXkdT2mJfMzA6+NrN6VkHNv+4o/BaapGei3RiWPmlwerDKijHCOdRPPsyaldepYIy18FprKbom6rDrvaxq8jJtMgp2XvEJsqnbkP2pLWpgYd+fDp5ooqWbo8tTR7HTrewDHiz1mPR520kghP2tfj9e3FeqXIxhKDLkcH8xoVH2cRcOONAm2c+cDh6qsWE4jBtHV0IYGqhYOqdbRiJ4dxSOmTlDm68yEehZ0DcXB/FefRKBBDKGwOA296k/G1aaqlfvwS3tU4FyylO9UIYSOkhXZeaBy9TBw0Tt62R6uULibc1MHFy/18YGs0ngZ51YKGMDKVfroOVp2rBnObapH4dexm4DrWrr+ulX3gOdU/cyMTJg9evI445i4ce/iUZoa186aC9+jyn51akT7SpjdMuX0JWhsHsfZTD/Yn72bwTcYi5gsv/y6Yw0+CCw0M8/R+HN2tdBAJPSiwBkwqS+nX6ATZ/3ezSFfc4Zh+b0hyDpi6JlHBwqcHB97QRsgSuJ6GuYsh1Xq/8p4o7X6zjy+dfg7GThVxf3dnSddly59mBfgXNQEJQv+4O3NY6jOzCNA3z9cvKL0O6rmqk0Po1aHTwtZvS75xHYPzk6TTl5FN5z/mJafGkpeNrE3O1fLM7v0MI+jdfHWkfMJ9Xn1nF0Xt5vPK+wROnZfGfqIuzHR5+K84hZQZv1HiUZgvG5Qq+f2SIe15zePOxm5j3s+V01X7IV/YyMQ2BdFRR/THTLB56K85j79QTMgWdjsQ2lMfOfSfkIYDfvunyyvrHdtk24866G/3i0tduPg27eAKx6Dak5yTrXIonqm1iKYnXqzo0YRgUz1I+R7WrrgXPGfEAWaMZTfrTk+b6KFMnTqU+J5+6R6+guzmirCRS9Cuy6pqgvjVUprLXqV2OhSWlfX4udqZhP77rUe5d8QvK8iLsM2nww7cfXPcyX50IL7wvefAEGyEEcw+zOW5lnG/tbVDXDnXtHrYBpx6obHOerRBMnzIJt2FzL/06dh81imjVO3Fe3CZxPY/WbvXaDym3uOtY1RX4wBvOkOq8Xtq4nfv+1cSX514VDLPuj770K6OHflVt2cSOlVdj5ZUhne6gXKJ6+QLsYlVYH+hXImtfPGuB1q8hoIOv3ZS+PiAAG24+lYpllxCrjwKC0tN/irKLFsEqsure7yDdONW/672a62/AaX/XG64ny6bXX+LFDxs4ZTq0xC3G5Vss+EIGIauD1e/BBX/sxjJUMSeAlILzb17Jixs+wPNcXt7ucPOL3dgmdDlqqyI7ZDC+MIuHv7MPl636gNqWbiKdHsc9LsnMzgEgJz68Gq+dCTn07m50nTjd+HZKfAAAIABJREFULXW8/oszyS8uwXNiVP9uvqpxgbQ6Fyl7JvUhVDoFADO7gIKcsK6T0HyiGUi/wkBDSx1Kw25K0y9IaBj00jDZ1dpvNmVnGiaE4HOnXsx1y3/K+V9sY+YRgytJeGHD+7zzYQsnTIOWuEFLM4Rsk5P3D/HHCoP/qZbgxMjLVAHPuDybeUfl8su/VWDi8UaNy5J/xigKCxq6JLYhGJdnMb00zFPnK9PZ45dt4z+RGNs7QsxZ738lW4xzBlfn9cJbW1n+eidfPPeKIPAarn41RiNIJ07NQ4tUBsxzg477XiSuGSqdovVrCAwr+BJCFAGrgMnAFuA0KWVjH+e5wNuJX7dJKY8fznU/zRSMKedny9dy9dzZNNTVYuX3bRgqrFDCO8fFbW8IVpcCZWFhCIPxe00JViZNdTW8tbR3sOK0Rof1fL9/68Pcd/k5PFuzjWfXp96SjZXRRn5eTtr5ruvxxuYoc744nvM+W8SZv3mXL06QXHhkiIffcnjywwyk0832pjgnragFciGUixWCgtJJO90CHQo7E/Lm+ihWd3cw4kl6DqDGJXmJ4FYg8LraAIERzlWDZUNhVY/nv9ZEy730vF4TCTSjg9aw0cXXL1A6JF0XK7+sz3OtXOVQ77TUpenX9i0fctHsGViG4M41/wJUsOEbg6ZiZmbhj5MOZ+fyjYtv5Yknf03c2cTsz00f8PmuWTyf4xct5cXaKC+m6ZfFQXurBqPqSFIjq7vhN6+0KsuKRdM57Jcf8rXJggsOt1m2weGPFQYtcYd/buviyLv9+rJM8vIzGVda0u/2Z3/8eUMFK991+cJZl6VlvIarXwDCsrEsCxfAMBGmnaZhvn6Z2UVawz4Cw818/Rj4i5TyFiHEjxO/X9HHeZ1SysOGeS1NP6QWOvrp+7LTbkQYNtJziK5dTPm5i9WUeSEQhoX0HKqe/EnyfsJg3Nw7ej321qVzhv38hhoQ3Xf5OTz3wTbW/V8NriOZe3iYcMjgtMPCvNiaz7m3rCInf9fVdO2MproaXKk6Gf1B5hgmQhgIw2DMKTdgWhbljkPkiRtINSQqPfVGRGIOGlIGdV4y4TEEDLhloBlxtIZ9zPQs1Pbi3eqzdNyPkJ5H3ZpfpOkXqPFDtSuvDO7T1tZK+Rk39bJLqF6+UA1pTGHGty/ij08vY0vNq/xg9hGY5s4bXYYaEB2/aCnVkSiH3VlDS0xyyoEZmKbBWYfZvN2WweO/XPiRZjSm4nkelz/wd9pLD+PzZ5w9pPu6TryXfvn4+jXGiRNZfT05mRbdLcn7BhqWol/l593OjhULVSOY1q9BM9zg6wTgq4mfVwAv0LdwaUaZ+mfvBunitKhVWWoLsEQiTCttlbM74QdrPS0syoDjaqK7tJ5rIKQwMMO5WAXlaYOCncZqNUJl1bVYeSU4LXXqBiGgvYnomlvVr1YGxd+6GGQyq2gXTwhq8kxTVwKMMlrDdkOk5xF5+maEaeG2N6Xplx84yF49eYPnyBPnUfHOq1x8z8Nce/oMxo8pGPhOg8QP1vqyr5hd0/yRZjSm8s93t/Krte9x8PHfZ99pBw35/lIozZGukz7sHIg8fn1K/Z2kuT4KknT9mqnc9n39qnnwhxihMHYoQ+vXEBjuO1UmpdwBIKXcIYTor5IxUwjxGuAAt0gpnx7mdTUDIJ1uxl2wVHWgoPbjfW+pyBM34HW24rkuF82eAYDnulQ+fAUTzvnFx/m00+jXwqJmtD270jHsTGr8obEJ7XLbG7AKysB1GXveHexYvoDiWQsIFU0g1lRL6sy62lXXqTt5DlZBOV5XWzDGQ43h0On7UURr2G5K+bm3YdghKu+e00u/ZHcnIJL65blYa+9g4tzbdv6gKUw96LM0j53M5Y/fxefHG8z71mFkZtgj9vx3Zl/xUYKvqrom7ljzOo35+/H1+b8KFmxDxnWpWfFDpOcmNUwASMZftIKaB9W2YvExl2Jaam5lqn5F190WzOe0C9SWsa9hWr8Gz4DBlxDieaCvwqKrh3CdSVLKaiHEVOCvQoi3pZQf9nO97wHfAzhn0U18+fgzh3CZPYeecx5Tj/tIKam69zu9/KGkG8frbGHHioXBvrzvnm7mjUF2d6o0fmM1WeWq8LSjpoLo2sV0R7fhOk7KY7lcPXd2cO3R7F4ZqfqtkWbMsQtwPRmYpQLUPHQZxccsSJvnCGCFMvDySzHsDLx4N2ZOEYAK0B5chIlHvKuVnBL/Oz+mHaBHmNHUMK1fisHoF6iZppV3p5c2KBsKl5pHfqQ6gz2vl36NnXsHsdoKssaq4vvOyDbq1vyil351t9ThtAqunju7T/3KLx7DV7/3E7a+/w7nLbmXC742jZmfHbgWbDAMdbuyPzq6Yvz2T2/yv1XwmZOvYJ/iMcN6PMMOUX7OL4k31QQaJgyjzwYtALugTM3otEMY4Ty1qF+xEGFn4nW1YVl2ioZp/RosAwZfUsp+B+IJIWqFEGMTK8axQKSv86SU1Ym/K4QQLwCHA30GX1LK+4H7AX7z94qPnlf+hDNQkJOTk6v8pdw4pafemHKLJPL4jZSecj1W0Xic5lrq194WjCLqD7/4cvzk6cHMLoBQ/himzruLjcsW0bjlwyAQS30en6R24sFaavT35WGkjM+Qqdu20v9L4sWVOzUS4jHVIeR3CknXwe1oYseKhTitUfJLx+qW7F3MaGqY1i/FYP4/W4bAs6we+gWRx2+g7LSfJmbWqnmPA+mXT0/9ysgbwyHz7xlQv/ba5yAmLryTNX97mgdv+xOnHTWJ4z+/78daw7StpoEnXtrEy1s7OOCY8/narAOHrV85Obk0RiPJGSmJxWPQgS2lsvlIHHMdB5G4zdewWGQzblsDRjgPmQi6tIYNneFuO64BzgNuSfz9h54nCCEKgQ4pZbcQogT4L+DWYV73U0+ae/QLv067TQiof07VfEnXwetqperX56kPmueAYaniVaArWklmyQSEYeB2NFGx7BIaoxHsRIbGn0bvT7QfPzl9VThcK4rRZiBLjYHE7eq5szFNE9fpRjrx4PagY8hzcRor8dqb1PFethICIQwmnr+ErUvn6Jbsjx+tYR8TwQSPPvQLIVT9UbwLrzNFvwCQxCKbkcgR1S/DMDjsGycjv34Sf/nHM6xe+nemFUgu+OZB7FVetMveh1Qcx+XJf/yHP71ZiRwznckzzmbmCfsGtw/GEmggDZt/3FG4LbXguUg3nnaOdOPgubidrYg+x5cLMEzMnCJKj1tE9/N3ag37iAw3+LoFWC2E+A6wDTgVQAhxJHChlHIesD9wnxDCAwxUvcS7w7yuJkFfq42r586mqa2TseffiXTiweBtf44gQMNf7kfGOtW+v9/tIlU3n+c4xNoaCeUU4nZ18NbSHxBrbVBdMns4A4lbTk4ubc/drlaPmcpCAs/FyMzBLp6AMCyswvGABNNKdjcGSKTnUb18Yb+ea5pRRWvYx0h/+mVaFjLWSfl5tweLHKe5FrtoAlX3zKXhL/fjdXfALtAvIQQHf2kWfGkW0eqt3PC3dbh1bzC9JMTpX9yHiWUFhDN6fq4/OvXN7fzxlQ/49wcRGr0wY4/4FkddfPlHfryBNGz8XlNoe/7OpIaBssIJ5ymLIsMCZLBzkq5hUgVnbY3UPHaNdrAfBsMKvqSU9cA3+jj+GjAv8fPLwMHDuY6mf/pa5TRGI6qJxVW1D9JJKYBMmH16sU7GJoTNb/WORTbT9Oyd4MQZc/wVhEsnUfXIlXixDvx9taotmwAwTZPyibvD7MRdz8Zli+jYURkU9wLK08ZtUsIvBG57U7A6r3loEdKJEVl1LT2npKl/C5FwgE6uwnfVhAHNztEa9vHSn36Z65cgPTfQMB8/U+N1dzB2zu1Id9fqV8m4vSg59SKklDRFa1my4W80r9tAyOlgemmY7AyLk78wDdMwyMoMkZ8T7vNxumNxGlo6AIg0tbF+w3Yq69uRmfm0yQz2OvK/OeCLh2DZIxfU+WxctojOyFYk9KFhjQjTVjsk7Y1pOyQ1Dy3C62wlVcOkE6N29bWYQjBh8vQ+53emovWrf3Rf6Cec/kbd7Fh5TdANE491Bx3FwrQQdiZOYzXx+kolbjupa/BiHao4fMVCrMLxhDIyAeiObts1L2g3xO3qwMwtZtK8u9OO+00KZjiPeENlUEwvXQdhhTDzxlB+5s/TupKqly8kJ9PqlarfVRMGNJrdmZ2N6jIsGzuUkaZfhp2hDD3bG9VA7lHSLyEEhWPKKZypGig816Wrs4Pm+lp+9tI/1GuJVpBvOX3ev6kjTs646WqguJ3L9G9cxoGZWdgZu34YtdKvEsrPvjVNi7qj26h+9Eomz1/B5iVnB/oF4LapRi2rcGxaB7zWr5FDB197GDXbK3CdONJz6aipAKD+2aVIpxsQeJ3NwYes4U+/pujo7wcdj13RSmKtDaitsTgdNRVqRRTv7uUHsyezcdki3K6O4PdYawNGOJfKh69AxruC49J18Dpb8LragtlnoFaUTnNN4I2j0WgGh69f3QmfPKVhkvpn70Y6MYRpBt3d9WsXg2EF9hKjqV+GaZKVk0tWTi7le00b0cceLj2nlfj6Vbv6ul76hYSqR65EGAZjz0uabMei27Dyy6l95Eej+tw/Tejgaw/DdV2yyvdW4x4Ni+gzS3Bb69T8NFDbjok6pfr16Y720vMwc5RzvF2kjD/d9iZqH7kcp7We6vu/S2DZ7rl0l439xLUV+11ATXU1yJTORUMYqlaurgYrtyTN7X/7AwtUF1Csk7FzlwTHvXg3TnMtkcQg82T3oy/0gnhDFZ6V/JjF2xrIKdF1EhpNX/j6JQwLpKT+ubsTRfct6RoG2GP2YscDlwb31fql7DR6TivZ/sACQJWajEvRL+nEiDdUEk3Y48geNahCKNuP1Cyh1q+RQwdfeyquS+3qa5Gug5ldkDI/TTnqOQ1VqqZCSjoj6sMlPUelm4WBlJJ4TI35KJ9zu7o9pTOm6t7vKPdj+Nh8wAZioOGyfaXJX//FmXQ31rBt2cXBsWA4dkK4pRNLLqQ9R9VIuA7xyJaULRCJ29FI/ZM/Ib+4JHisiZN1S7ZGMyBSAhKnqYay035CdN1taRrmNFQHhfhD1S8hBJX3XEBjNEJzfTTNfmJ30rCPol8Vyy7Bi8cGqV9KxNyOJmWLk6pfUqqFY0cz3c/fGTyW1q+RQwdfewj+lHrXcZToWDZmVj4g1DghK6QMC6VUoiXAbW+ifv3taXYIVuE44vXbqXlokRrMnXCVBlRheXMkWCHJjGzaulSNg5mZBX0IxWiTKliN0QjlZ6jVcmqBrV+H4L9ndeuW4CXS8a6r5p6NOf4KhGGQWTKBjpoPAUHtqmuJpawC1TxHG2EY2MUTiDdUYaZkuSzTZukf/zkaL1uj+cSjthydhDeexMwuBARW0XiEYal6rxT9chqr8DpbqFuTrEkaSL+EaeE0qFFFRnYh0nV2Kw0btn51NA1Zv6zsQvUdIESafpmmSVFpubaS2EXo4OsTjp+G9r1tYq0NmDmFKhsjRHIHLGGel/gluH/pCaql2XMcJGAXjWfHg5dRMnuRGl676hrlCebP7PJcrMJxmNmFjJ2zOCjg7GuA7cdBauHnW0t/EJgt9lVg67ouGSWTkJ7DuAuWAhCPbqduzS+wSyYSj/qmjgKrcBzCMFJW3yQG/artjnhTDSDTBvsOZXbmYB3BNZo9idT/936Xdpp+AQi1gEz1+fL/MsJ5lJ14xc71y58L6VtSeC5mdgFjz7sD6cR2Kw37OPTL7WgOjklUwNbfNftD69fQ+fi/LTXDwk8BXz13NlPn3cVbS3/AuLl3BPv8kJg0L0QwqgNEImNjBh/ujpoKhGGpbkjDVB9Wy8YI54IQhIongDASKyeBME3iTbXEE0IYa62nvsXjotkzkPEYRWMn9HquuyKl3zM13xiNULVlE6Zpjtg1/FU3QCgjk1h3l/rdtGj4873gOURWXZMY05G4rhAYQwi+dCpf82kk9f/91XNn09bl9NYv00YgEIaRpl/qNouMkklJ/bJCvfUr8ZkMJYZJO021AMTrK5HSwy+m8DXswpmHYoXCaeUCsOfoV9A9alppI9L8odoiJUidtPc+g7qG1q+ho4OvPQwzM4vq5Qtx2xoD4TFCWdSs+KHK0khlkud1tiI9Jxh/43cIxeq24rTVB2l7t71JZc0gxe/Yz6JJ7CIVZBnZhZTMWkhW+VQ233n2qLUd92xx9leLw7XCkE4M6TlsX36Z6hQ1TKTnseWuc9XtiXloAOXn/Aq7eAKxyOagc9RvydZoNIOnL/1CSkRGmB0rFqbpF0i1zZjYppRenFhkM05run4JYSClR89+Rym9QL8gqWGRx2+g9PSfjMo0j49Pv5Q5rZlThDBMJly8Ai8ew22uJVyqFuRbl87RQdUuRH877GEcOE852G9ctojOukrM3JKEUCXxOlsJj5lAd1tz0r3YSKyCpIcQBmOOv4L6P98DnkPt6uuCQEN6HmZ2AUY4DyA4LhBI6anWcAkb7pinjgtBKDsveF49GWlzvlhLHdsfWBDYQGz/3aXB62P2wmBFmZOTy/bHrsHOKcJtayQe3Y5EUv/MnXidLdQ8/COk5wZdVr6AW/nlgKT2kSuQCTm3Csf1MUZIo9EMlV76lTeGHcsvTTsnVb/Kz7pFHUzoFyhNStOvx65KWyxJ6WHlJbbf/B0BKcHz1ELTc6l9+hfUMrB+wchq2ED6BdBcH2X8XlN66RdA9JklO9Uvu6AcKSU1Dy0Cw6TsrF9Q88jlwWJa9gpRNbsKHXztoRw4bzEbly2iK1pJYUlpcLwxGiE8ZgIHzlvMG3fPZ8cK9YF2WqJJcXJi1K25Fa+zmbIzfp6S8hJE1y6m+NgfIkxbrUD9gdGeg0AQKp2CmV1AyfHJ8RiRVdew4Y55uK31vZ5nz5WfX0S67dGruGj2DOXCbJoICcI0MYRBfnFJ/8ImTMaedwdevBu3pS5oDqhdeRX1a28j3tYQdOxcfOyRiTslRUfGuyg9/Wf4YzSiaxdj5ZepuhHTwrBDePFudX6i5b3nfDSNRjM8fP1yuzqId7UGGvZR9EuYNtLzP6OC2tXXUjzzYjXb0Iml+rdjFY3fqX7dPP/MXrrTl4bFurvZuvpaLpx5GCQWfEKCadmD0i9/HJy/qPP1C5TWfhT9EqatthtF0m7Daa4N3oedmdVqRh4dfO0h9FXwGAbG9GgNVrVhi6nZXkHRzIsTc7ygZuWVaSukUMkkJWxCkFU+le7oNsZPnk5rTj6Rx68nI28Msdb6wLDVyMxJ+/CGSibhOTGQUs0MszPAMLlo9owggAJlCJhKrLtL1XMIgecqT7Ky036qth5MOxHkwbbV16ngzHNpSmTZDDsT6bnEIptVq3oPMTlk/j1ULLskeD+EYVE8+zIia+8IDFG9zhbw4mqlaVjJIl0A11E1b56L196IVTQBGfPNWAUgg+0C7Yej0QyNnhoWBsi0yCnZu0dtq8pCFc28KKj98vULANchVDY10C9/G83XsKgQ1D15oyruzy5QBf1SKo1KYBdPVIuqHvq1teJ95h93VKBffRWUx7q7lHa4DhiW0i8INGlQ+pXiFehzyPx7gOT250D6JcyEfgmRaLjycBqrwDBx25uwSyYlsoW+vQTgOYGGGSk+YpqRRwdfewiDTW+ndkeKzFz1AQWEYSq7hPpK8FxiEeUODcoHTHoOVVs2UTzzYroeuZJD5t/DhjvmUX72rYFnjNNQFWTC/MBLWCF1Dc+l7Myfk1W6VyCCAK/9/NRg3pqPVTweI7tQNQoAodIpePFu1WruxIg3VGFk5WOG83A7W4LX4LRE1Gsxbcy8MUGRqf/6epJfXKKex+yFZJRMojOyjei6xcEWhv9agGTRbskk9R4BMtaB29agVpWJINZ/Xd0lpbpeQqMZAoP5vKQGaPWRHVi5Kgjy9QuUX1VP/QKVMarZXkHBmHIA2rocSs68BX/h5DTXBtfxA6+e+mUXlOO11Aaf89d/cSae9HppmF00QY0YyykiVDoFUKbMQohAvybNu5vtKxaBp6wufP3CMBFCYOWXBWUhg9EvQPl7+SUkiZm+hhVSTVMlk4hFtxEqmYQwTGSsgx0rFuJ1NvetYT0aDjQjiw6+PmX4ApdMWfvIHrMehfrAGxZWQRkCsEIZwapo4zI1dLXqvnnJR0h0zVgFZclxHmkWF72RPewZOmoqEIkVl3SdoFAUSAmGEuaAna2Un5usxXAaqoiuuw3pxtOEVD03h9duPh0p3WDl2hiNsHHZIgqOvhBQ9RJuWwPRNbcqjyAp8dobiTx+PUX/fVHwWFbhOIxwHhPPX0LVI1cSefwGQrlFxNsacBLbI7rFWqMZeVIDtAtnHpZyiwwWRUofjDT9AhViuS1KF5rqavCEmXC9T9zL8wCJVVA+aP3ypIedU9RLw4LHlKkWGQS1VaDGIcl4V6Bhvn7ZRROIR7cm7Gv85+bw2s2nASLQME96afpV9ciVeJ0tgX6R0K+alVdR9I3k6wQwwrlMPH9JcD+tYaOPDr4+hdw8/0xcKbESnUQ+DX++h6JvfJdQRgau6yZS5CJtGCuodHRXtJLSU29IHEi2Jtc/s4TiY5Jt4tKN47ZGEYaZSI2rVWa9ZeMZJsikQ7W6g1RZswRCqBbz2seuV3PJpBIi37XZaaxOXj9lm9HvYpKeo1awhknpKdcH54UyMshwXerX3kbduiWBO3bpqTdiFY7zr47bEqH2sauoXXkVkNimAIStBvSOP/tmqpcvDLY0tSGhRrNrCQrc+9CvsjN+RrxuizIL7UO/3MTfwrAYe/pPcJ1EiQEkaqRuS+iX6u/urV8KX788zyXW2tBLw3x8/apZeRVed7tqTEroV+Tpm/G62pK1qT3KJFI1TBgm4773G7VtSHKKRtOzd/WvXwlT7NrHrqJ21bUq25XwOBNW8j3RGvbxMKzgSwhxKnADsD8wQ0r5Wj/nfQtYApjAMinlLcO5rmZ4tLW1UnbaT4N0OKhBqvXrbgME5ROnUrVlE0Yoi9pV1xDKTU679wvWq7Zu7v3AUuK0RImsukZ1Qxqm8ucxLUpP+2nC4E8gBNihDLYtu1iJ2rrFgWC57Y0Y4TyMzBy8rrag9MGfS6YcrlUxanStWjEmB1p343Y0J4QmvV5BFe5bKpUvRLACVrd1Me6CpexYvgBh2smsWSILKKVHVtle6lBXB7HmOoRpsnXpHACE9KhYdoleKX4C0Rr2ycMvcLc+eC/QMF+/VNZcBSc70y9/NFoaUuK2Nw6gXyqg8vXLzCrADOelaZjTWo+ZW6yeh0xkv2IdjJ1zu9KThH5Z+eVU3z8vqLH14t0IO4PKuxO6kqJh0vOIPHEjRd+8ELtAbZvGG6qAFP1asbAP/Ura4hx55aqgiSHWXBvoF2gN+zgYbubrHeBk4L7+ThBCmMDdwDeBSuBVIcQaKeW7w7y2ZgQRwsBta6B29bU4pWMDx3y/s8jHL1i/aPYM7KLxSAmGreoSkml/ofx1DEPVHfjFpwBIaldfjxfrxOtsQXqemseGirPMcD4lsy8DKYmuu035+yxfiNfemByN4ToIyw6eU+q2gJmVT8nsRdglewVCZ6QIWmoBq2maxNsakJ5HLLJZrQrzxiTeD0G8vpKs8qlk5I3p9R7o1eEeg9awPQBfv3asWIjTGsUwzAH1K6NkUtIwmRT9EsZO9AtqV1+Xpl9+Fl56HnbJJGirp+SYBYF+1az4IW57A/GGxOMH+pXSZ5nQsOJjFlC/9jaKZy3AHjNZ3ZYI1tTiGIQVCuq50vTLddL1KzHuLCN/TFCs778XWsM+foYVfEkp3wOCwuh+mAF8IKWsSJz7GHACoIXrY0ambO9Z+aWqq6e7nZ8tX8vVc2fTSQi3q4O3lv4gOC/e1sDN88/s8UCq0Tm6/naEYWBkFygbBsNM2EskVoT+Sq2rjfJzFwe/+6OLaldehdveSM2jVyaKXJPbizLNLV6qerDEYPDgdSRq1ZAyzf4hteYi8rgSTt8mwks8Ru2qaxCG2aNWTNJRU0G8rYENN5+KTNSiGcLYbYeJa4aG1rBPNv5n39ev0uMWEVl1HfnFJUPWL6UxEjOrQJUrGCZee6OaD5loKIL+9Suy6hri0W1IN07t6msTZrDJwMzXRF+/nIaq4Lhv9SCEEZRKpOpzKjWP/jjQMF+/Io9fHwRpqa/Lc5xe+gVJDdP69fExGjVf44HtKb9XAp8bhetqdoJpWb1quSzLJicn2ULduOXDYLBrcD/TpO252wF/tSaCGi0Z76b09JuS6XnTJh7dSu2qa7FLpwRmhmqMkcpchUqnBOs/M7uAsXPvpPLuOQjTwswrofzsW7FDGWx/YEHQweO11GKaJsLKoHb1db1em+zRuQRK/OqfWYJ0HcbOuR3pOhh2SJmreg7165cgkcEWJkAsshnTsgKPodFy7dfsdmgN2w3pqWGWZTN+8nS6Ez5aA+mXE0vpzE4gDJOys9WgbmHaxBsqEwXsdsKEWgZjjACVnUoE7mZ2EcWzLyO65lbcjiasgvLAs6vmkcuV1qXoV6y7S5nArrq212vrpWGeq6aNoEow1OPGcZtrkZ6DPWYyVb8+r5d+GVq/dlsGDL6EEM8D5X3cdLWU8g+DuEZfS8p+20eEEN8DvgdwzqKb+PLxZ/Z3qmYYmKbZa4RFqjfVlUtXcvXc2b1GbABUoGoEah5alHZciYOaIxlZfR0y1qVWcUC8Xn13iZ7eMQnfGz/tnpqxclqiVN5zgVoxmmbC/4agIaB41kIiq65WGbtE0azTGkUg0gbIggrsnKZahGGo9L/nKhE1DPykmhEKB6aN/vUN02Ti5L37dLDWfDIYTQ3T+jV69NSweFtDULc0GP2qfvAy/AYgSHhkJf6pI49fH+iX8s5yiUW39dYv+s+aOi1Rtt91jiqYN61e+oVhI6xQmn657WqyflS5AAAMFElEQVQbMXXskY90HSKrrkV1dm4Hz8WwQiQ3BYTWr08QAwZfUsqjh3mNSmBiyu8TgOqdXO9+4H6A3/y9Qs862AXk5OQGq79UJvYwZN0ZBWPKyTj6UjWRI5Exqrx7DlZ+KcK01epszu0qBZ/izuy2q/ouVf/Qu43bbakDkl2FTkudcrgH6p64ITjPi8cQlo1VOI7SU28MMmk7ll+qVqJCoFQpXRil5yHdOKadSWaJEriOmgqk51B89PcDEQSoXX0thSVjAiHXfDIZTQ3T+rXr6ctQGj6afvlDuUOlU9TsSNdBWKE0/Yo8fj3RtYsRhpXUr/rtSVf5VA1LjAUSwsDMLUo471uD1q94Q2Wis1oAfeiXVNuMAoGwQmSWTFD65cQwwrmUHJtcEGv92r0ZjW3HV4HpQogpQBVwBnDWKFxX0w8jscfvz0b0DVbNnCJVv5BYBYoU0Sj+1nxCifT8jhULQZIceeGvGv3Ml1SzI8vPVm3dNQ//iPJzfplmbAjw71tOR2TmUn7WLapLyJ8xaWdQu+parLwSJaSJegwjnGIoK0QQeAGJDJiJMKzADRsglFM47PdJs0egNWw3YiT1y84poru5DiuvBLetASOcD6TrV+mpN+I01yanfkhlAxGv25JI9KcESEIE2492QTk1D/+IsrNvRbZG+tSv0lN/Asj07mzPoeaRH6nsfEKz/DzrlEsfYfOSs9N0Sm2DquBN69cnh+FaTZwE3AWMAdYJId6QUs4UQoxDtWPPklI6Qoj5wHOoNu3fSSk3DvuZaz5W/NVUW5eDKyVjz7uDyl/PxUkYA0rPSS96lzIwLDRCYWoeWoTb3hRkuCDpnQUkazkGmDfmF+KndgtFVl3D2PPuIFZbQahsanBezYOXDf+Fa/YotIZ9OvH1a+q8u3jt5tMZe94d1Ky8CqehUmW1eugXJLLmCf3a8eAPcVvre+iXr1kk/cX8wd19PAcVtPXOngkrRPnZt+I0VGn92oMZbrfjU8BTfRyvBmal/L4eWD+ca2lGn/7S+z29YIxQVpCyj6y6BlBC5TQmdmakRzy6FZC4rfXKXNXfGgTcjiYMw8TtaKJ25VXKYXqQCGH0KWzp3ZHpz7V21XUYZnJch3RdpOdQu/JKRMpxIT1ySqYHr3kw74Xmk4XWsD2XQX9mpRvUSknPI7LqGqVfDVWYtq2MWCXEo1uT+gXgOUETkduhiuFrV12LVVDey2ewL0zTCu4/JDwnzaNLuq7SQNfp5d2l9Wv3RTvca/ploPR+Tk4uTXWbkhYMicDFEAYYJpOm7Z92ftWWTWmeMz6+54yfSRt33mIGwhAGTkdT0K0YICVePKZ8vSRYucXJ+2RkM/7sm9m6dA6/XvvKgNdIRbdjazSfLAajXxXLLsEQAq+9EVAaZggDTwhCmZmUT5yadp860+SIH6/q9Vh+YDMU/Yq3NQRGqQEJi4mqe78D9NYvAMPO0Pq1B6CDL81HZmcf6Jvnn9lrpdUYjaTVWvUkJyeXxuiHaR1M0nX6bDXLLy6htaGOyOpret2WkZnJnWv+xfzjjqL0+P+Xdlt3dJsKDjUazaeagfSr7bnbqehxXPSTUYeh61dOTi5VT/4Er8djmpbNhCnTqdq6WevXHowOvjS7hL6ETdVY9L8qvHLpSiV6z9+ZPNjZTNX938UQBt3FJcHhnJzcAR2ax+81Jf2xUo5rNBpNf/QXmO2sa3Co+jVQNqrXYyXQ+rVnoIMvzW7FSKbHdapdo9GMJlq/NINFB1+aUUMXfWo0mk8yWsM0I4UOvjSjhl7JaTSaTzJawzQjha7c02g0Go1GoxlFdPCl0Wg0Go1GM4ro4Euj0Wg0Go1mFNHBl0aj0Wg0Gs0oooMvjUaj0Wg0mlFEB18ajUaj0Wg0o4gOvjQajUaj0WhGER18aTQajUaj0Ywiwwq+hBCnCiE2CiE8IcSROzlvixDibSHEG0KI14ZzTY1GoxkptIZpNJqPg+E63L8DnAzcN4hzvyaljA7zehqNRjOSaA3TaDSjzrCCLynlewBCiJF5NhqNRjOKaA3TaDQfB6NV8yWBPwkh/i2E+N4oXVOj0WhGCq1hGo1mxBgw+BJCPC+EeKePPycM4Tr/JaX8DHAMcLEQ4ss7ud73hBCvCSFe+/saPcRUo9EMj9HUMK1fGo1mMAy47SilPHq4F5FSVif+jgghngJmAH/v59z7gfsBfvP3Cjnca2s0mk83o6lhWr80Gs1g2OXbjkKIbCFErv8z8N+oIleNRqPZ7dEaptFoRprhWk2cJISoBD4PrBNCPJc4Pk4IsT5xWhnwDyHEm8ArwDop5bPDua5Go9GMBFrDNBrNx8Fwux2fAp7q43g1MCvxcwVw6HCuo9FoNLsCrWEajebjQDvcazQajUaj0YwiOvjSaDQajUajGUV08KXRaDQajUYzivz/9u7YRY4yjOP494caCw1YWIhJQEERgwiCiGKnFqeIoiBoIYJCGgUFC5X8CYKVAVEUm5AgqBgwEiMIaVQUCWKIkZDGI4KFhYKFBB+LbHHIXnJmZ+a9fe/7qW5mX26eZ+/udw+zszsOX5IkSRNy+JIkSZqQw5ckSdKEHL4kSZIm5PAlSZI0IYcvSZKkCTl8SZIkTcjhS5IkaUIOX5IkSRNy+JIkSZqQw5ckSdKEHL4kSZImtNDwleT1JD8l+SHJx0muWWfdSpJTSU4neXWRY0rSUMwwSS0seubrKHBbVd0O/Ay89t8FSS4D9gEPAruBp5LsXvC4kjQEM0zS5BYavqrq86o6N9v8Gtg5Z9ldwOmqOlNVfwMHgUcXOa4kDcEMk9TCkNd8PQt8Nmf/DuCXNdurs31zJdmT5Lsk3x07dGDA8iTpghbOMPNL0kZcfrEFSb4Arpvz0N6q+mS2Zi9wDtg/71vM2VfrHa+q3gbeBnjn2Jl110nSRkyZYeaXpI246PBVVQ9c6PEkzwAPA/dX1bywWQV2rdneCZz9P0VK0qUywyRtNou+23EFeAV4pKr+WmfZt8DNSW5Msg14Eji0yHElaQhmmKQWFr3m601gO3A0yfEkbwEkuT7JYYDZxawvAEeAk8AHVXViweNK0hDMMEmTu+jLjhdSVTets/8s8NCa7cPA4UWOJUlDM8MktbDQ8DW2a7dva12CJF0S80vaWq66cuMjVeZfX7r8kuyZvfOoW7332Ht/YI+abys8Z/bYh957HKu/nu/tuKd1ARPovcfe+wN71Hxb4Tmzxz703uMo/fU8fEmSJG06Dl+SJEkT6nn46vY16DV677H3/sAeNd9WeM7ssQ+99zhKf91ecC9JkrQZ9XzmS5IkadPpevhK8nqSn5L8kOTjJNe0rmlISZ5IciLJP0nubF3PkJKsJDmV5HSSV1vXM7Qk7yX5LcmPrWsZQ5JdSb5McnL2O/pi65qWTe/5Bf1mmPm1/MbOsK6HL+AocFtV3Q78DLzWuJ6h/Qg8DhxrXciQklwG7AMeBHYDTyXZ3baqwb0PrLQuYkTngJer6lbgbuD5Dn+GY+s9v6DDDDO/ujFqhnU9fFXV57P7sgF8DexsWc/QqupkVZ1qXccI7gJOV9WZqvobOAg82rimQVXVMeD31nWMpap+rarvZ1//yfl7Iu5oW9Vy6T2/oNsMM786MHaGdT18/cezwGeti9CG7AB+WbO9iv+4l1aSG4A7gG/aVrLUzK/lYX51ZowM29T3dtyIJF8A1815aG9VfTJbs5fzpxD3T1nbEDbSX4cyZ59vy11CSa4GPgReqqo/Wtez2fSeX7AlM8z86shYGbb0w1dVPXChx5M8AzwM3F9L+LkaF+uvU6vArjXbO4GzjWrRJUpyBedDa39VfdS6ns2o9/yCLZlh5lcnxsywrl92TLICvAI8UlV/ta5HG/YtcHOSG5NsA54EDjWuSf9DkgDvAier6o3W9Swj82tpmV8dGDvDuh6+gDeB7cDRJMeTvNW6oCEleSzJKnAP8GmSI61rGsLsIuMXgCOcv8jxg6o60baqYSU5AHwF3JJkNclzrWsa2L3A08B9s7+940keal3Ukuk6v6DPDDO/ujFqhvkJ95IkSRPq/cyXJEnSpuLwJUmSNCGHL0mSpAk5fEmSJE3I4UuSJGlCDl+SJEkTcviSJEmakMOXJEnShP4FoLKlt0z6yaEAAAAASUVORK5CYII=\n", "text/plain": [ "
    " ] @@ -516,19 +474,9 @@ "execution_count": 13, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n", - " FutureWarning)\n", - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:459: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n", - " \"this warning.\", FutureWarning)\n" - ] - }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkMAAADFCAYAAABXT/Z3AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzt3Xl8nGWh9vHfPUsy2ZM2aZa2NGkbWrAtqxWFww6yt6UV6SKKyCYgyvGg4vvCcd9ePXBAOYCg5WVR2lpQRKCCIKBsAqUKLbQk3bI0ezJJJpmZ5z5/zGRIaEpLM5lJeK7v55NPM0+e577vhHLlyrOkxlqLiIiIiFt50r0AERERkXRSGRIRERFXUxkSERERV1MZEhEREVdTGRIRERFXUxkSERERV1MZEhEREVdTGRIRERFXUxkSERERV/N9kJ3v+Os7+nXVaVD9+o0cf3h1upcxrLdCzdyztY+D5x+U7qXIKDj/I5816V5Dsii/3t9Yzpnx6vpNO5WNabavGaYzQ+PAU6++ne4lvK83XtqY7iWIyAiN9ZwZj5SN44fKkIiIiLiaypCIiIi4msqQiIiIuNoHuoF6OAZLgd8h4AVjxt69ltZaQlHoCHuwjL31iYiIjFXGGnLIJ9OTiRmj30Mtlj6nj246sWb/npMYcRkq8DsU5gRwjA/GYBnCWgI2At0h2sPedK9GRERk3Mghn/zsfPBYxmgXAguZTib0QJCO/RpixJfJAl7GbhECMAbH+AioB4mIiHwgmZ7MsV2EILY2j42tdT+NuAwZY8ZuERpgzJi8hCciIjKWGczYLkIDDCO6jKcbqEVERMTVPhRl6OVnn+Sis4/hwjM+zm9/eXO6lyMiIiJJ9MJTL7LixM+x7LgLuPcX9yd9/BHfQP1BXH3BIjo6O3fbXpCfz013r92vMaPRKD//3nV8//bfUlxWzpfOP52jTjiVaTNmjXS5IiIiso8uW/LvtLf37La9sDCb/1n90/0eNxqNcuP1N/PTe35ESVkJl55zBUef8gkqq6eNZLlDpLQMdXR2Un3JLbttf/v2K/d7zE0bXqX8gErKp8a+KMedvoC//+UxlSEREZEUam/v4cDLbtxt+1v/8+URjfvma5uYPK2CigMqADjx7ON59vHnklqGxv1lspZdDZSUTU68Li4tp6WxIY0rEhERkWRpbmxmUsWkxOuS8hKaG1uSOse4L0PW7v4LlvTkmIiIyIfDcN/nk/2E27gvQ8Wl5TQ17Ey8bm6sZ8Kk0jSuSERERJKlpKyEXXW7Eq+b6psonjQxqXOM+zI0a86h1G2toWHHNsLhfp7+00Mcdfwn070sERERSYLZh8xiR+1O6rfXE+4P8+QfnuLoUz6R1DlSegN1QX7+sDdLF+Tn7/eYXp+PL173fb552VKcaJRTF51P5UzdPC0iIpJKhYXZw94sXViYPaJxfT4vX/72VXz1gq/jRB3OOO80qg6sHNGYu82R1NH2Yn8fn9+b+ceexPxjTxqVsUVERGTvRvL4/N4cdcLHOOqEj43a+OP+MpmIiIjISKgMiYiIiKupDImIiIirqQyJiIiIq6kMiYiIiKupDImIiIirfSjK0M/+71f49HFzuHTR8eleioiIiCTRD//jJyw4YgmfO/ULozZHWspQR1sL3/vSCjrbW5My3ikLzuO7t96XlLFERERk/7W3dnDDxdfR0daRlPFOX/JJfrLyB0kZa0/SUoaefPBenLr1PLH2nqSMN/fIj5NXUJSUsURERGT/rVv1CJHtb/H4A48kZbxDPjaPvIK8pIy1JykvQx1tLby6bjU3njuFV9etTtrZIREREUmv9tYOXnpkHTctLuelR9Yl7ezQaEt5GXrywXs5eyZUl2Zx9kySdnZIRERE0mvdqkc4e6ahujTA2TNN0s4OjbaUlqGBs0LLjigAYNkRBTo7JCIi8iEwcFZo+RGxf3x9+RH54+bsUErL0MBZoYm5fiD2p84OiYiIjH8DZ4Um5sb+DfiJub5xc3Yopf9q/YYXn+GZ+hD3v75jyPbCpmdYdOGX9nvcH1x7Oa+/9Dc621tZcdLhrLjiq5x27rKRLldERET20fq/vcJf6kLc/3rdkO0Tml/hU5cu3e9xv3XV93jt+fV0tHWw5KjzufArn+XMT58+0uUOkdIydP2tq0Zl3G/8+NZRGVdERET2zXdX/mRUxr3h5m+OyriDfSh+6aKIiIjI/lIZEhEREVdTGRIRERFXUxkSERERV1MZEhEREVdTGRIRERFXS+mj9aOhqWEnP7nuS7Q178J4PJyxZAULV1yc7mWJiIhIEuyq28X3rvkRrU1teDyGs5eeyZLPn5vUOVJehl5+9knWrrqNxrrtlFZMZdGnLuXIY07c7/E8Xh8Xf/UGqg+eR093kKs+/UkO+/ixTJsxK4mrFhERkb15/qkXWXPfGuq3N1A+tYzFyxZz1PHzRzSm1+fliv9zGQfOqaYn2MPFZ1/Okf92BJXV05K06hSXoZeffZK7fv0tqhZWUFk5j/baDu769bcA9rsQTSwpZWJJKQDZOblMraqmpbFBZUhERCSFnn/qRe647XYqF1RwQNUc2mu6uOO22wFGVIgmTprIxEkTAcjOzWbajANoamhOahlK6T1Da1fdRtXCCibMKMLj9TBhRhFVCytYu+q2pIzfsHM7WzZuYNa8w5MynoiIiOybNfetoXJBBRNmFsS+x88soHJBBWvuW5O0Oeq3N/D2G5s5+NDZSRsTUlyGGuu2U1hZMGRbYWUBjXXbRzx2b0833/3KRVz6tW+Tk5s34vFERERk39Vvb6Cwauj338KqPOq3NyRl/J7uXq6//Ftcdf0XycnLScqYA1JahkorptJe2zFkW3ttB6UVU0c0biQc5jtfuYgTzjyXY04+c0RjiYiIyAdXPrWM9pquIdvaa7oon1o24rEj4QjXX/afnLzwJI497d9GPN57pbQMLfrUpdQ8WEfrljacqEPrljZqHqxj0acu3e8xrbX81w3XcMD0ahZ/9rIkrlZERET21eJli6l9qI7WzR2x7/GbO6h9qI7FyxaPaFxrLT/62v9j2sxpfPoLS5K02qFSegP1wE3Sa1fdxqa61ymtmMrnP3fDiJ4m+9erL/LEH1ZTWX0QX1xyMgCf+9I3mH/sSUlZs4iIiOzdwE3Sa+5bw6btWymfWsbFl14y4qfJNrz8Tx7/3Z+ZPruKi06PnTy5+NrPc9QJHxvxmgek/NH6I485cUTl573mHP4xHt1Qn7TxREREZP8cdfz8EZef95r30bk8XfvnpI75XvoN1CIiIuJqKkMiIiLiaiMuQ9ZasDYZaxk91sbWKSIiIvvMYmE8fPu08bXupxGXoVAUPDYydguRtXhshFA03QsREREZX/qcPnDM2C5EFnBMbK37acQ3UHeEPdAdIuAFY8xIh0s6ay2haHydIiIiss+66YQeyPRkYhh73+Mhdkaoz+mLrXU/jbgMWQztYS+ERzqSiIiIjCXWWIJ0EBzLZ4aAkfY0nS4RERERV1MZEhEREVdTGRIRERFXUxkSERERV1MZEhEREVdTGRIRERFXUxkSERERV1MZEhEREVdTGRIRERFXUxkSERERV1MZEhEREVdTGRIRERFXUxkSERERV1MZEhEREVdTGRIRERFXUxkSERERV1MZEhEREVdTGRIRERFXUxkSERERV1MZEhEREVdTGRIRERFXUxkSERERV1MZEhEREVfzpXsBe/ODK5cSDHYB0NHSjGMdAKL9IYzPD4CNhDFef+IYGx382gImvj2C8fqGeX/Q/tEwGYEsIo7dbS0+j+G/f/9Csj/FfdLcHuTSH97D7d/4DBMLcva436atjZx29U08fvOXqZ46adjtRXnZibGstUPGHTzP29t2ccY1/82jN11NTV0LF313JStvuJDJxYWJ7YXTc6nf1sjFh1/IVbdcQ3F5MT+56Ptc+6tv0rBtF7+4+r844/JFPLfu7zRt38WkA0o5/oxj+cPPV/PVO7/BzLkzeO5PL/Cr625l7skf5Y3X36S3K4TX5yXaH4nVdQfwGrAWJ2Lx+A1YMB6DCXvJmpBJqDt2TKTPwXgtNmrwZXqIRqIYj6G/J4zH78EJO7sdbwKWaMTBGHAiFuM14IDX7wUsXq8XJ+pgsQRyAhTk5NPYtItoxMHr83Dk0Udwxfcv5bVnX+fRBx6nqa6ZzMxMPD5Db3eIkopiTjvvVA49Zh7AkP2ifQ49oW7CfWECOQFOWXgS5166YMg+7z1exo895ZexDtFIGLw+DAYn0p/IoBHlV/y1NyOw21o+LPlVPXXSkPEGZ9jg9/eUX4tPOIznN9QkPtba0cN5193Gmh9dxgGlExLzbNnRnNgecSzX3LGWd3Y2M3NKCV84+Si+c+cfhox99nGHse6Nt+kMhvD7PIT7o0T7Hf74wJ/Iz8qns7eTaDg6JMOAWNZkePeYX+/NnMEZZh0LxsSybA/5FcgJMHlyBTt31hHqDuGELcYHjmP3mF8lFcUc+JFq3vrX27tl0L7kFzDuMsxYu/s3/T2546/v7PvOSfLNz53F9C/cDMDO2rfJLD4AgJqbljP1qnsA2H7zisT71lp23PIZpl51D9ZxCLfuICN+TN1dV1Lx+Vti7//6aio+d9Nux2+/eQUer5dpV96921q23nIBv3j4xVH8bIe3c+UV5Ad8PLzuac465TiuWX7qHvdddO3P2fbOZg6YPpO1P75i2O3/dkh1YixgyLg/u/fxxOv//+jzmJ5WbPYE6ps7qMiOUtfjpby4ILH9t3d8kSPO/gHV+VEanWwOOKiS/rfeJOPAg9j2Zi1ZkW525WQy+fSJFE7OpH1nH3WPtVHS1YuZMInvPPgjrvrEJfhtP51FAaYvLSerzE/H5l4anmwlb3oWXbW9lB1bRLgrSss/Oig7aSKFs3Lo3NzDtrW7KJ5fwJRTS2jf3MXWBxrxZnuJ9kSZdl4p+VXZdL7Tw9ZVjWQU+YiGnMTxDc+00fx8B1POLKbw4FzaN3XT8EQLudOyCG4PUXZsETmTA3TX99HwZCsVJxTT8noH3dtCVC0to6A6h84tPbxzXz2V5ZV02yCVC8sJtffx1h9rKTuhiMnzKgg3R6h9sJ5PX3geAL/91QNULiyn4ZUmdrzUSOV5pRRW59FV00PNb+qZN+cQdjRup3JhOQWVuXTUBhPHpzpMzv/IZ01KJxxFYym/6n79Zfq7WplyxUpspJ8dt35+2Dzrb962x/wq/+yNGGOG7D9wfNXV9+62lr3l186VV/CfF52RhM96d4NzZaT5tfbHVwwZDxj2/T3lV8Offsphn/l24mPtXT0UeUO0RQMceuDUxDyvvbWdIm+Iul4/JQcWEzmMRIY1PNbGxK5e8gonUt/cgceGY/l1fhlZ5RmJ/PLleMidnk3z8x1MPaeESHeEpudjGZYzJUD3tl4anmqj/MQJeLM9u+VX4cw8Ord088799eRWBsityqL5xdjxOJaGp9soO7aIrNJMepv7d8uvvGnZ9Db1U7uqgeL5BTghS9s/O5l2XikFM3PoqundLb8KKnN55/EdbH16Jwcvn0npwRMTGfTR+R/lpRdfet/8+uSZn2T6R6oSOTdeMkyXycaBnlA/Dz/9EreeW8zDT79ES0f3sPtt2trIho1b+NXCHDZs3MLb23cNu331uue59dxifvfECzz0lxcT4761bVdint8+9jfaWlq4a0EWDY3NZJoIdy3IwmfDtMa3t7W08OvfPY8nGuauBVmY3iD1r2/kjoW51L/+JqY3yKRiP1PPmEjulABVE73kTQkw5bQiJhX7CTc18bvbHiLbhOnP8lH16VLypmcR7bNMmJvLtCWlNP+jk2mLSyk6JI+29V1MW1LGhLm5RHqjBEoyqFpeRrCmF4tD4excqpaW01vXR9XScgpn54LHUHhQbHtwa2jI8cGaXqqWl5FdnkmkJxqfs4yW17qoXFxG0SF5WMcyYW4e05aU0vBcK52be6haXkb+zGyMz1AwK4fpy8p5e9NmKheWUzQjn61P1VG5pIwJc/MJdnRRNCOfyoXlPPrA4zz6wOOJ/bY/10DV0jIKZ+disRTNzqPq/HJefu4fiX08Xs+Q40XGm+b2YFLz64V/1SbGG5xhDz35ImufeOF98yvTRPj+ykcTH2ttbiEc6uGuBVmEQz2sf2Mzv1qYw/o3Nie2d5sI2cdkkTcowyriGdbS3ILPDsqvGdlD8qt7e4jgO7GcySrNoOXldzPMhh2K5sWypenvHcPml8Uhd0YWVcvL6Hy7h9ZX3j1+13PtiWw0HobNr2jYITDJnzi+6eX2ROZgGDa/PF4PzW+0UXV+OabAGZJB6x58Yq/5te7BJ4bk3HjJMJWhcWD9W9s5a6aHWZMyOWumh5UPPzfsfl//+WqWzfExr8zPsjk+rr151W7bF83yUmCCzJqUSYk/xHEV/Ylxv3bLqsQ8+TbIkoP9HFrmY0KWYflcP4eWeflIiZdFs30cWuZj+Tw/t965jnmTPBwS/9g51R7mlfs4pzr2urnXMqUqgM9Ath+yfFBeGaC517J8no/Hb1/Lirl+evqhcEY2HgM2avFkesidFsAJOeQeEMCb4aGvPUzutACeDA82YsFa8qdn09fSH7uYYAx5M7KwjiVvRhbGxH8gMMS2R+yQ4/uaw+TPyMYSnzNj0JzTAngzPVjAk2nIm5ZFf1sYJ+TEjhl0jiF/RjY2aimozAWgpylE7rQsvJleIuEwAAWVuTTVNdNU15zYL9wbJX9GNnjAxi+fFMzMJRpxEvsMGDheZLy5+49/S1p+LZvj47If3p0Yb3CGHTe5n2J/6H3za8VcPz9e+QjL58U+tmC2j4+UeBPZdla1l3llfs6K59ehZV4CPkM4z2FiltktwxbGj99TfkV7HfpawuRPj2XGkAyz4M30kHtAgL6W/mHzywLYWMZEQ86Q4/vbIrFsHMip4fLLWmzEkj8jO5FfeTOyIXbXAbB7fkEswwpm5ibyC2IZFOoO7TW/Qt2hITk3+PixnGEqQ2NcV3srb22t44LDY9fZLzg8Z9ifrgZ+erp8fuxegcvnB9iwcQuPPf9mYnvYsZw720d7T5g3GkK0Bvs4oypKJOqw9JBsNmzcwlmzA+xo7aOrL8Lig3ysb4gQisAlh/tpDEbpCcO5s/30hh0+f6if/EyYW+qhucehNwKLD/LRHIwd2xuBybkGX2s/DtDdD44F09LPlDzDhYf6KQgYls71kZcJwdqeWKnxGpw+h+DWEJ6Ah+C2ENF+h8xCP8GtIZx+B+MzYAydW3rInJiBIXaJtGtLL8Zj6NrSS+ISsCW23WeGHJ9Z7KdzSw9mYM7+QXNuDRHtczCA02fp2tpLRpEfT8ATO2bQidfOLT0Yr6GjNghAdkmA4NZeon1RfP7YvRwdtUFKKoopqShO7OfP8tK5pQccMCb2v2LH5iBenyexz4CB40XGk4GzQsnIL4BLjsykraWF0w7MpDkYSWRYqD/CGVVRWoN9bNjZM2x+OdZy/hwfBQHDxYf5CfY7LD7IT28ENjVHE/m1a1B+Nfc4HFzioa+xj4IARJx3M6wi1ySOz8kYPr+8WR4yJ/oTmTEkwwxE+xyC20JkTswYNr8MgIlljDfgGXJ8RpEvlo0DOTVcfhmD8cVyciC/urb0xO6ZjGfYe/MLYhnWsTmYyC+IZVAgJ7DX/ArkBIbk3ODjx3KGqQyNcS/96bccOMFQnBu7WbI41zfsT1cDPz2V53oBKM/1xn+KWpnY3tIdparIw6LZPv7joXoWzPJTWeSluT2IifSybI6Ph98I8otnGjj3ID9VRR6uXRdi2Tw/k/M9/Hp9hIWzfcyY4GFHp0NfBFbM9bO51XL3a2EWzPIxc4KHhvYIMyd4WDDLR1WmZesfm/E191HbGoWmPnY80sS5lSZx/GObo1ww01Dzm0a6tnTjyzS0bgiydXUjxUfks3VNI23ruyg6JI+tqxto3RDEl+Ul1NRPzX0N5FYFMHho3xik5v56sioyqbm/nvaNQXAs7W/GtudOCww5Prcqi5p7G+ip78OX7Y3P2cDEQ/OoXdNA2/oujMfQuqGLrasbKTt6Avkzs6m5t4HOzT3YiKVjUzfv3FdP9ayZ1D5YT9uWTqYdX0Ht6gZaN3SSW5BH25ZOah+s57TzTuW0805N7Df16DJq7m+gfWMQg6FtYxc1v6nnyKOPSOzjRJ0hx4uMJwNnhZKRXwA+A8vn+Vn1Sgt3v9yRyLAdu9qoLPKyYJafrz5UN2x+RRz40+YoK+b6CTuwrd1J5NTX/xxK5Fddx7v5tfK1MNcc4qV9XQtvb+ylqctJZNiSSpPYb062M2x+5UwNkDs9i5r7Guht7Gfike9mmPF7aHs9li0lHy8YNr8MHoJbeqm5t4H86mwmHP7u8ZOOLkxko3UYNr+8fg+hXeHE8SVHFiYyB8uw+eVEHYoPLqLmN/XYDs+QDDpl4Ul7za9TFp40JOfGS4aN+Ruo3f402W3XrqBu06tUTMgesr1iUjG//+mVideVC75OuL9vt+N7+h2yM2KdNxp1Etv7opbsjFjAeL3vftzBQ19/mAxv7GsWilgCvtj7/VGb2D7w18YY6AhZsvwM+djATx39UYsF+jO9RBzweSCjL0qGxySO749a/F5Dd9gSyfbhxL/2TtTi8RqsYzHx/aMRi3fQ02DhzgiBoozYKWc9TZZ0uoF6ZMbT02SjcQP1Of9+C3W7dr80sj/5Be9mWNhChs+Hh9hrx7F44hnR3RdNZNbg/IJ3M2xwfr13v/fmV4bX0Be1hDK8OHZohg3er99ht/yyYYsv20ukM4Iv34d17JAMG9jP6/Pgz/TrabJRsK8ZNubLkIzuUx4j9VaometWr2fJFYvSvRQZBSpD7jGWc2a8WnLPE8rGNNPTZCIiIiL7QGVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERczZfuBcj4FvD4CPiyKApMSPdSRERE9o+1dp/fgEs+yP7JftP8ml/zp2/+8f6W7q+f5nf3/GNhDW6f//3ePuhlskv2q3Elj+bX/Jpf9le6v36a393zQ/rX4Pb590j3DImIiIirqQyJiIiIq33QMnT7qKxC82t+zT8e5h/v0v310/zunh/Svwa3z79HJn5Tk4iIiIgr6TKZiIiIuJrKkIiIiLjaPpUhY8xpxphNxpjNxpivj/aihpn/LmPMLmPMP9Mw91RjzF+MMW8aY/5ljLk6DWsIGGNeNMasj6/hW2lYg9cY86ox5uFUzx2fv9YYs8EY85ox5uU0zF9ojFltjNkY/7vw8RTOPSv+eQ+8dRpjvpyq+T8M0plh6cyv+PxpzbCxkF/xdaQtw5RfYz+/9nrPkDHGC7wFnALsAF4Cllpr3xj95SXWcCwQBO621s5J1bzxucuBcmvtK8aYPOAfwMIUf/4GyLHWBo0xfuBZ4Gpr7fMpXMM1wJFAvrX2rFTNO2j+WuBIa21zqueOz78SeMZa+0tjTAaQba1tT8M6vMBO4GPW2q2pnn88SneGpTO/4vOnNcPGQn7F15G2DFN+JdYxZvNrX84MzQc2W2vfsdb2A78BFozusoay1v4VaE3lnIPmrrfWvhJ/vwt4E5ic4jVYa20w/tIff0vZne/GmCnAmcAvUzXnWGKMyQeOBe4EsNb2pyNI4k4Ctoy1IBnj0pph6cyv+PxpzbB05xe4O8OUX/tmX8rQZGD7oNc7SHEZGCuMMZXAYcALaZjba4x5DdgFrLPWpnINNwLXAk4K53wvCzxujPmHMSbVv8V0OtAE/Cp+mv2XxpicFK9hwPnA/Wmae7xShsWlK8PSnF+Q/gxTfsWM2fzalzJkhtnmuufxjTG5wBrgy9bazlTPb62NWmsPBaYA840xKTndbow5C9hlrf1HKuZ7H0dbaw8HTgeuiF96SBUfcDhwq7X2MKAbSMe9cxnAOcCqVM89zinDSG+GpSu/YMxkmPJrjOfXvpShHcDUQa+nAHWjs5yxKX6dew1wr7X2d+lcS/z05lPAaSma8mjgnPg1798AJxpj7knR3AnW2rr4n7uAtcQufaTKDmDHoJ9mVxMLl1Q7HXjFWtuYhrnHM2XYGMmwNOQXjIEMU34BYzy/9qUMvQRUG2Oq4s3ufOD3o7ussSN+89+dwJvW2p+laQ0lxpjC+PtZwMnAxlTMba39hrV2irW2kth/+yettStSMfcAY0xO/MZP4qd3TwVS9mSOtbYB2G6MmRXfdBKQshvoB1nKGD3FPMYpw9KYYenML0h/him/EsZ0fvn2toO1NmKMuRJ4DPACd1lr/zXqKxvEGHM/cDxQbIzZAdxgrb0zRdMfDXwG2BC/5g1wnbX2kRTND1AOrIzfie8BHrDWpuUR9zQpBdbGMh0fcJ+19tEUr+Eq4N74N9N3gAtTObkxJpvY01CXpnLeD4N0Z1ia8wvSn2HKL+XXmM8v/XMcIiIi4mr6DdQiIiLiaipDIiKm/DW4AAAALElEQVQi4moqQyIiIuJqKkMiIiLiaipDIiIi4moqQyIiIuJqKkMiIiLiav8LmT/ze1lH2aQAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkMAAADFCAYAAABXT/Z3AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAH4dJREFUeJzt3Xl8lOWh9vHfPUsy2ROSQBLAJAKCCohL0R49rtW6FhCqstS6VLFVq7U91tr31WOP3U/76tHWo7a2WHegaGutSrVatXXfaBUUSBBIAtmTSTLJzDz3+8ckY2KCIJnMJD7X9/PJx8yT57nvm6hXrnmWYKy1iIiIiLiVJ9ULEBEREUkllSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTXfJ9n5zr9t1q+rHiHT3r6JYw+ZlupljCrXbdjOAXP3T/UyXO2cA79sUr2GRFF+fTxlUOIpw1JvTzNMZ4ZGiWfeeD/VSxh13nllfaqXIOIayqDEU4aNHSpDIiIi4moqQyIiIuJqKkMiIiLiap/oBuqhGCx5foeAF4wZffdaWmsJRaE17MEy+tYnIiIyWhlryCKXdE86ZpT+DLVYup1uOmjDmr17TmLYZSjP75CfFcAxPhiFZQhrCdgIdIRoCXtTvRoREZExI4tccjNzwWMZpV0ILKQ76dAJQVr3aohhXyYLeBm9RQjAGBzjI6AeJCIi8omke9JHdxGC2No8NrbWvTTsMmSMGb1FqI8xo/ISnoiIyGhmMKO7CPUxDOsynm6gFhEREVf7VJShV59/mgvPOIrzT/0sD/7qllQvR0RERBLopWdeZtnx57HkmHO595f3J3z8Yd9A/Ulcce4CWtvaBm3Py83l5rvX7NWY0WiUX3z/Wn5wx4MUlZTy9XNO4YjjTqJ8yvThLldERET20CWLvklLS+eg7fn5mfzvqp/t9bjRaJSbrruFn93zY4pLiln+hUs58sR/o2Ja+XCWO0BSy1BrWxvTLr510Pb377hsr8fcsO4NSvepoHRy7JtyzCnz+Mdfn1AZEhERSaKWlk72u+SmQdvf+98rhzXuu29uYGJ5GWX7lAFw/BnH8vyTLyS0DI35y2SNO+soLpkYf100oZTGHXUpXJGIiIgkSsOOBsaXjY+/Li4tpmFHY0LnGPNlyNrBv2BJT46JiIh8Ogz1cz7RT7iN+TJUNKGU+rrt8dcNO2oZN35CClckIiIiiVJcUszOmp3x1/W19RSNL0zoHGO+DE2fOYeaLVXUbfuAcLiHZ//8CEcc+/lUL0tEREQSYMZB09lWvZ3arbWEe8I8/cdnOPLEf0voHEm9gTovN3fIm6XzcnP3ekyvz8fXrv0B371kMU40ykkLzqFiqm6eFhERSab8/Mwhb5bOz88c1rg+n5crv3c53zr3Gpyow6lnnUzlfhXDGnPQHAkdbTf29vH53Zl79AnMPfqEERlbREREdm84j8/vzhHHHc4Rxx0+YuOP+ctkIiIiIsOhMiQiIiKupjIkIiIirqYyJCIiIq6mMiQiIiKupjIkIiIirvapKEM//7/f4OxjZrJ8wbGpXoqIiIgk0I/+46fMO3QR5530lRGbIyVlqLW5ke9/fRltLU0JGe/EeWdx4233JWQsERER2XstTa1cf9G1tDa3JmS8UxZ9np+u+GFCxtqVlJShpx++F6fmLZ5ac09Cxpt12GfJyStIyFgiIiKy99aufIzI1vd48qHHEjLeQYfPJicvJyFj7UrSy1BrcyNvrF3FTWdO4o21qxJ2dkhERERSq6WplVceW8vNC0t55bG1CTs7NNKSXoaefvhezpgK0yZkcMZUEnZ2SERERFJr7crHOGOqYdqEAGdMNQk7OzTSklqG+s4KLTk0D4Alh+bp7JCIiMinQN9ZoaWHxv7y9aWH5o6Zs0NJLUN9Z4UKs/1A7J86OyQiIjL29Z0VKsyO/R3whdm+MXN2KKl/a/26l5/judoQ97+9bcD2/PrnWHD+1/d63B9e/VXefuXvtLU0seyEQ1h26bc4+cwlw12uiIiI7KG3/v46f60Jcf/bNQO2j2t4nS8uX7zX495w+fd588W3aG1uZdER53D+N77MaWefMtzlDpDUMnTdbStHZNzv/OS2ERlXRERE9syNK346IuNef8t3R2Tc/j4Vv3RRREREZG+pDImIiIirqQyJiIiIq6kMiYiIiKupDImIiIirqQyJiIiIqyX10fqRUF+3nZ9e+3WaG3ZiPB5OXbSM+csuSvWyREREJAF21uzk+1f9mKb6ZjwewxmLT2PRBWcmdI6kl6FXn3+aNStvZ0fNViaUTWbBF5dz2FHH7/V4Hq+Pi751PdMOmE1nR5DLz/48B3/2aMqnTE/gqkVERGR3XnzmZVbft5rarXWUTi5h4ZKFHHHs3GGN6fV5ufT/XMJ+M6fRGezkojO+ymH/figV08oTtOokl6FXn3+au357A5Xzy6iomE1LdSt3/fYGgL0uRIXFEygsngBAZlY2kyun0bijTmVIREQkiV585mXuvP0OKuaVsU/lTFqq2rnz9jsAhlWICscXUji+EIDM7EzKp+xDfV1DQstQUu8ZWrPydirnlzFuSgEer4dxUwqonF/GmpW3J2T8uu1b2bR+HdNnH5KQ8URERGTPrL5vNRXzyhg3NS/2M35qHhXzylh93+qEzVG7tY7339nIAXNmJGxMSHIZ2lGzlfyKvAHb8ivy2FGzddhjd3V2cOM3LmT5t79HVnbOsMcTERGRPVe7tY78yoE/f/Mrc6jdWpeQ8Ts7urjuqzdw+XVfIysnKyFj9klqGZpQNpmW6tYB21qqW5lQNnlY40bCYf7rGxdy3GlnctTnThvWWCIiIvLJlU4uoaWqfcC2lqp2SieXDHvsSDjCdZf8J5+bfwJHn/zvwx7vo5JahhZ8cTlVD9fQtKkZJ+rQtKmZqodrWPDF5Xs9prWW/3f9Veyz7zQWfvmSBK5WRERE9tTCJQupfqSGpo2tsZ/xG1upfqSGhUsWDmtcay0//vZ/Uz61nLO/sihBqx0oqTdQ990kvWbl7WyoeZsJZZO54Lzrh/U02b/eeJmn/riKimn787VFnwPgvK9/h7lHn5CQNYuIiMju9d0kvfq+1WzYuoXSySVctPziYT9Ntu7Vf/Lk7//CvjMqufCU2MmTi66+gCOOO3zYa+6T9EfrDzvq+GGVn4+aecjhPL6uNmHjiYiIyN454ti5wy4/HzX7M7N4tvovCR3zo/QbqEVERMTVVIZERETE1YZdhqy1YG0i1jJyrI2tU0RERPaYxcJY+PFpe9e6l4ZdhkJR8NjI6C1E1uKxEULRVC9ERERkbOl2usExo7sQWcAxsbXupWHfQN0a9kBHiIAXjDHDHS7hrLWEor3rFBERkT3WQRt0QronHcPo+xkPsTNC3U53bK17adhlyGJoCXshPNyRREREZDSxxhKkleBoPjMEDLen6XSJiIiIuJrKkIiIiLiaypCIiIi4msqQiIiIuJrKkIiIiLiaypCIiIi4msqQiIiIuJrKkIiIiLiaypCIiIi4msqQiIiIuJrKkIiIiLiaypCIiIi4msqQiIiIuJrKkIiIiLiaypCIiIi4msqQiIiIuJrKkIiIiLiaypCIiIi4msqQiIiIuJrKkIiIiLiaypCIiIi4msqQiIiIuJrKkIiIiLiaL9UL2J0fXraYYLAdgNbGBhzrABDtCWF8fgBsJIzx+uPH2Gj/1xYwvdsjGK9viM/77R8NkxbIIOLYQWvxeQz/84eXEv1H3CMNLUGW/+ge7vjOlyjMy9rlfhu27ODkK27myVuuZNrk8UNuL8jJjI9lrR0wbv953v9gJ6de9T88fvMVVNU0cuGNK1hx/flMLMqPb597QAWP/+Mdzrr2dlb/+BL2mTAuPs+mbQ2cde3tXHP+qfzu2VfZvL2BqZOK+crnjuC/fv3H+PEP/uU1LrxxBWccczBr33mftmAIv89DqD3MH+59FBzAa8BanIjF4zdgwXgMJuwlY1w6oY4QXp+XSLeD8Vps1OBL9xCNRDEeQ09nGI/fgxN2Bh1vApZoxMEYcCIW4zXggNfvBSxerxcn6mCxBLIC5GXlsqN+J9GIg9fn4bAjD+XSHyznzeff5vGHnqS+poH09HQ8PkNXR4jisiJOPusk5hw1G2DAftFuh85QB+HuMIGsACfOP4Ezl88bsM9Hj5exY1f5ZaxDNBIGrw+DwYn0xDNoWPnV+9qbFhi0lk9Lfk2bPH7AeP0zrP/nu8qvhccdzIvrquJfa2rt/Nj8Wv3jS4g4lqvuXDNkhvWN/dH8CvdEifY4/OmhP5ObkUtbVxvRcHRAhgGxrEnz7jK/Ppo5/TPMOhaMiWXZLvIrkBVg4sQytm+vIdQRwglbjA8cx+4yv4rLitjvwGm896/3B2XQnuQXMOYyzFg7+If+rtz5t817vnOCfPe809n3K7cAsL36fdKL9gGg6ualTL78HgC23rIs/rm1lm23fonJl9+DdRzCTdtI6z2m5q7LKLvg1tjnv72CsvNuHnT81luW4fF6Kb/s7kFr2XLrufzy0ZdH5M+5fcWl/OeFp+7y6z+/90keXfssp594DFctPWmX+y24+hd8sHkj++w7lTU/uXTI7f9+0LT4WMCAcfvP87vHX8R0NmEzx1Hb0EpZZpSaTi+lRXnx7W/87joq519DgTdEczTAnP0mx+d5872teKNd1GelU3ZKIfkT02nZ3k3dE80UtneRk1/IG7+7jpJTvonHhmkrCLDvOSVklKbRurGL6gfrKD48j/bqLkqOLiDcHqXxtVZKTigkf3oWbRs7+WDNTorm5jHppGJaNraz5aEdeDO9RDujlJ81gdzKTNo2d7Jl5Q7SCnxEQ078+Lrnmml4sZVJpxWRf0A2LRs6qHuqkezyDIJbQ5QcXUDWxAAdtd3UPd1E2XFFNL7dSscHISoXl5A3LYu2TZ1svq+WitIKOmyQivmlhFq6ee9P1ZQcV8DE2WWEGyJUP1zL2eefBcCDv3mIivml1L1ez7ZXdlBx1gTyp+XQXtVJ1QO1zJ55ENt2bKVifil5Fdm0Vgfjxyc7TM458MsmqROOoNGUXzW/vZKe9iYmXboCG+lh220XDJlnPQ0f7DK/Sr98E8aYAfv3HV95xb2D1rK7/NpdBg1HIvNrzU8uHTAeMOTnu8qvuj//jIO/9L3411raO3eZXwXeEDVdfor3KyJyMENmWG1D65D5Vfd0E74sD9n7ZtLwYiuTv1BMpCNC/YuxDMuaFKDjgy7qnmmm9PhxeDM9g/Irf2oObZs62Hx/LdkVAbIrM2h4OXY8jqXu2WZKji4gY0I6XQ09g/IrpzyTrvoeqlfWUTQ3Dydkaf5nG+VnTSBvahbtVV2D8iuvIpvNT25jy7PbOWDpVCYcUBjPoM/M/QyvvPzKx+bX50/7PPseWBnPubGSYbpMNgY0tAR59NlXuO3MIh599hUaWzuG3G/Dlh2sW7+J38zPYt36Tby/deeQ21etfZHbzizi90+9xCN/fTk+7nsf7IzP8+ATf6e5sZG75mVQt6OBdBPhrnkZ+GyYpt7tzY2N3PTAX3F6urhrXgbhUCdvvbOR38zP4q13NhIOdTK+yM+kUwvJnhSgstBLzqQAZScXML7IT3NjIzfe9TjpJkJPho/KsyeQMyWTaLdl3Kxs8g7IouG1NsoXTqDgoBya32qnfFEJ42ZlE+mKEihOo3JpCcGqLiwO+TOyqVxcSldNN5WLS8mfkQ0eQ/7+se3BLaEBxweruqhcWkJmaTqRzijjZmVTvqiExjfbqVhYQsFBOVjHMm5WDuWLJlD3QhNtGzupXFpC7tRMjM+QNz2LfZeU8v6GjVTML6VgSi5bnqmhYlEJ42blEmxtp2BKLhXzS3n8oSd5/KEn4/ttfaGOysUl5M/IxmIpmJFD5TmlvPrCa/F9PF7PgONFxppE59dL/6qOj9c/wx55+mXWPPXSx+ZXuonwgxWPx7/W1NBIONS5y/y6a14GHSZC5lEZ5AyRYY0NjfhseMj8Kl80gY6tIYKbYzmTMSGNxlc/zDAbdiiYHcuW+n+0DplfFofsKRlULi2h7f1Oml7/8PidL7TEs9F4GDK/omGHwHh//Pj6V1vimYNhyPzyeD00vNNM5TmlmDxnQAatffip3ebX2oefGpBzYyXDVIbGgLv/9HdOn+ph+vh0Tp/qYcWjLwy53zW/WMWSmT5ml/hZMtPH1besHLR9wXQveSbI9PHpFPtDHFPWEx/327eujM+Ta4MsOsDPnBIf4zIMS2f5mVPi5cBiLwtm+JhT4mPpbD833Pkwy/p97fRpXmaX+Dl9Wux1Q5dlUmUAn4FMP2T4oLQiQEOXZelsP/99z2Msm+Wnswfyp2TiMWCjFk+6h7Q8H07IIXufAN40D90tYbLLA3jSPNiIBWvJ3TeT7sae2MUEY8iZkoF1LDlTMjCm9w2BIbY9Ygcc390QJndKJpbeOdM8ZJcHYnOWB/Cme7CAJ92QU55BT3MYJ+TEjul3jiF3SiY2asmryAagsz5EdnkG3nQvkXAYgLyKbOprGqivaYjvF+6KkjslEzxgey+f5E3NJhpx4vv06TteZKxJZH4tmenjkh/dHR+vf4YdM7GHIn/oY/Nr2Sw/P1nxGEtnx742b4aPA4u9u8yvOSVeAj5DOMehMMMMyrD5vccPlV/Z5QGiXQ7djWFy941lxoAMs+BN95C9T4Duxp4h88sC2FjGREPOgON7miOxbOzLqaHyy1psxJI7JTOeXzlTMiF21wEwOL8glmF5U7Pj+QWxDAp1hHabX6GO0ICc63/8aM4wlaFRru9d1bmHxK6zn3tI1pDvrvrePX11buxega/ODbBu/SaeePHd+PawYzlzho+WzjDv1IVoCnZzamWUSNRh8UGZrFu/idNnBNjW1E17d4SF+/t4qy5CKAIXH+JnRzBKZxjOnOGnK+xwwRw/uemweJaP+k6Hrggs3N/HzmDs2K4ITMw2+Jp6cICOHnAsmMYeJuUYLpjjJy9gWDzLR046BKs7Y6XGa3C6HXpaI3gCHoIfhIj2OKTn+wluCeH0OBifAWNo29RJemEahtgl0vZNXRiPoX1TF/FLwJbYdp8ZcHx6kZ+2TZ2Yvjl7HIJbQrE5t4SIdjsYwOm2tG/pIq3AjyfgiR3T78Rr26ZOjNfQWh0EILM4QHBLF9HuKD5/7F6O1uogxWVFFJcVxffzZ3hp29QJDhgT+1+xdWMQr88T36dP3/EiY0ki8wvg4sPSaW5s5OT90mkIRuIZFuqJcGpllKZgN+u2dw6ZX461nDPTR17AcNHBfoI9Dgv399MVgQ0N0SHzq6HT4YBiD907uskLQMT5MMPKsk38+Ky0wfkV3BLCm+EhvdAfz4wBGWYg2u0Q/CBEemHakPllAEwsY7wBz4Dj0wp8sWzsy6mh8ssYjC+Wk3351b6pM3bPZG+GfTS/IJZhrRuD8fyCWAYFsgK7za9AVmBAzvU/fjRnmMrQKNf3rqooO3azZFG2b8h3V33vnkqzvQCUZnt730WtiG9v7IhSWeBhwQwf//FILfOm+6ko8NLQEsREulgy08ej7wT55XN1nLm/n8oCD1evDbFktp+JuR5++1aE+TN8TBnnYVubQ3cEls3y88TGKHe/GWbedB9Tx3moaY0wdZyHedN9VKZbtvypAV9DN9VNUajvZttj9ZxZYQj1O/7cqYaqB3bQvqkDX7qhaV2Q1nc6KDo0ly2rd9D8VjsFB+WwZVUdTeuC+DK8hOp7qLqvjuzKAAYPLeuDVN1fS0ZZOlX319KyPgiOpeXd2Pbs8sCA47MrM6i6t47O2m58mV6a1gXZsqqOwjk5VK+uo/mtdozH0LSunS2rdlBy5Dhyp2ZSdW8dbRs7sRFL64YONt9Xy7TpU6l+uJbmTW2UH1tG9ao6mta1kZ2XQ/OmNqofruXks07i5LNOiu83+cgSqu6vo2V9EIOheX07VQ/UctiRh8b3caLOgONFxpJE5heAz8DS2X5Wvt7I3a+2xjNs285mKgq8zJvu51uP1AyZXxEH/rwxyrJZfsIOfNDixHPqmr+EhsyvFW+GueogLy1rG3l/fRf17U48wxZVmPh+MzOdQfm1ZdUOsiYHyN43g6r76uja0UPhYR9mmPF7aH47li3Fn80bMr8MHoKbuqi6t47caZmMO+TD48cfmR/PRuswZH55/R5CO8Px44sPy49nDpYh88uJOhQdUEDVA7XYVs+ADDpx/gm7za8T558wIOfGSoaN+huo3fI02a5uXvzCN2+lZufgU4tl44v4w88ui7+umHcN4Z7uQft19jhkpsU6bzTqxLd3Ry2ZabGA8Xo//LqDh+6eMGne2PcsFLEEfLHPe6I2vr3vPxtjYtuBAV/re9fRE7VYoCfdS8QBnwfSuqOkecyA4/1eQ0fYEsn04fR+7yNdUXwZXqxjMb37RyMWb7+nwcJtEQIFabFTznqaLOF0A/XwjKWnyUbiBupE5hd8mGFhC2k+Hx5irx3H4unNiI7uaDyz+ucXfJhh/fPro/t9NL/SvIbuqCWU5sWxAzOs/349DgPyy4labNjiy/QSaYvgy/VhHTsgw/r28/o8+NP9eppsBOxpho36MuQWI/kkx1i16J6nWHTpglQvw9VUhtxDGZR4yrDU09NkIiIiIntAZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERERcTWVIREREXE1lSERERFxNZUhERETczVq7xx/AxZ9k/0R/aH7Nr/lTN/9Y/0j190/zu3v+0bAGt8//cR+f9MzQxXvVuBJH82t+zS97K9XfP83v7vkh9Wtw+/y7pMtkIiIi4moqQyIiIuJqn7QM3TEiq9D8ml/zj4X5x7pUf/80v7vnh9Svwe3z75LpvalJRERExJV0mUxERERcTWVIREREXG2PypAx5mRjzAZjzEZjzDUjvagh5r/LGLPTGPPPFMw92RjzV2PMu8aYfxljrkjBGgLGmJeNMW/1ruGGFKzBa4x5wxjzaLLn7p2/2hizzhjzpjHm1RTMn2+MWWWMWd/738Jnkzj39N4/d99HmzHmymTN/2mQygxLZX71zp/SDBsN+dW7jpRlmPJr9OfXbu8ZMsZ4gfeAE4FtwCvAYmvtOyO/vPgajgaCwN3W2pnJmrd37lKg1Fr7ujEmB3gNmJ/kP78Bsqy1QWOMH3geuMJa+2IS13AVcBiQa609PVnz9pu/GjjMWtuQ7Ll7518BPGet/ZUxJg3ItNa2pGAdXmA7cLi1dkuy5x+LUp1hqcyv3vlTmmGjIb9615GyDFN+xdcxavNrT84MzQU2Wms3W2t7gAeAeSO7rIGstX8DmpI5Z7+5a621r/d+3g68C0xM8hqstTbY+9Lf+5G0O9+NMZOA04BfJWvO0cQYkwscDfwawFrbk4og6XUCsGm0Bckol9IMS2V+9c6f0gxLdX6BuzNM+bVn9qQMTQS29nu9jSSXgdHCGFMBHAy8lIK5vcaYN4GdwFprbTLXcBNwNeAkcc6PssCTxpjXjDHJ/i2m+wL1wG96T7P/yhiTleQ19DkHuD9Fc49VyrBeqcqwFOcXpD7DlF8xoza/9qQMmSG2ue55fGNMNrAauNJa25bs+a21UWvtHGASMNcYk5TT7caY04Gd1trXkjHfxzjSWnsIcApwae+lh2TxAYcAt1lrDwY6gFTcO5cGfAFYmey5xzhlGKnNsFTlF4yaDFN+jfL82pMytA2Y3O/1JKBmZJYzOvVe514N3Gut/X0q19J7evMZ4OQkTXkk8IXea94PAMcbY+5J0txx1tqa3n/uBNYQu/SRLNuAbf3eza4iFi7JdgrwurV2RwrmHsuUYaMkw1KQXzAKMkz5BYzy/NqTMvQKMM0YU9nb7M4B/jCyyxo9em/++zXwrrX25ylaQ7ExJr/38wzgc8D6ZMxtrf2OtXaStbaC2L/7p621y5Ixdx9jTFbvjZ/0nt49CUjakznW2jpgqzFmeu+mE4Ck3UDfz2JG6SnmUU4ZlsIMS2V+QeozTPkVN6rzy7e7Hay1EWPMZcATgBe4y1r7rxFfWT/GmPuBY4EiY8w24Hpr7a+TNP2RwJeAdb3XvAGutdY+lqT5AUqBFb134nuAh6y1KXnEPUUmAGtimY4PuM9a+3iS13A5cG/vD9PNwPnJnNwYk0nsaajlyZz30yDVGZbi/ILUZ5jyS/k16vNLfx2HiIiIuJp+A7WIiIi4msqQiIiIuJrKkIiIiLiaypCIiIi4msqQiIiIuJrKkIiIiLiaypCIiIi42v8HBhz3XIpb9tUAAAAASUVORK5CYII=\n", "text/plain": [ "
    " ] @@ -555,7 +503,9 @@ "y = iris.target\n", "\n", "# Initializing and fitting classifiers\n", - "clf1 = LogisticRegression(random_state=1)\n", + "clf1 = LogisticRegression(random_state=1,\n", + " solver='lbfgs',\n", + " multi_class='multinomial')\n", "clf2 = GaussianNB()\n", "clf1.fit(X, y)\n", "clf2.fit(X, y)\n", @@ -580,14 +530,6 @@ "execution_count": 14, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n", - " \"avoid this warning.\", FutureWarning)\n" - ] - }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYQAAAEjCAYAAAAypHaFAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzt3Xd4FOX2wPHvSYeEhBJC770oVUG6oiAIInbsFUXRa1f0er3XdvUqlp8ogh1FRbGAICoWiqJ0AaVK7xAgIYEkpLy/P3aCS9hNdpPdnS3n8zz7sNmdnTk7CXPm7WKMQSmllIqyOwCllFLBQROCUkopQBOCUkopiyYEpZRSgCYEpZRSFk0ISimlAE0ISimlLJoQlPIDETEi0tyH+3tXRJ6s6PFE5DoR+dlXcanwoglB+YSI9BKRBSKSKSIHReQXETlNRM4QkSMiUsXFZ5aLyGgRaWxd0JaVeD9VRI6JyJaAfRGlIpgmBFVhIpIMzABeAaoD9YD/AHnGmF+BHcBFJT7THmgLfOT0cqL1erErgM1+DF0p5UQTgvKFlgDGmI+MMYXGmBxjzHfGmJXW++8B15T4zDXATGPMAafX3geuLbHNpNIOLCI9RGSxVTJZLCI9nN6bIyJPWKWVLBH5TkRS3eynn4jsEJEHRGSfiOwWkQtEZLCIrLdKPQ87bX+6iPwqIhnWtuNEJM7NvnuJyHYROdP6ubWIzLb2uU5ELi3tOzpJtT6XJSJzRaSRm+OliMgkEdkvIltF5J8iEnXiJvKKdc7Wikh/pzeuE5FN1jE2i8iVHsamwoExRh/6qNADSAYO4LjwDwKqlXi/AZAPNLR+jsJRarjA+rkxYKx/twPRQBtgHXA2sMXNcasDh4CrgRhghPVzDev9OcBGHAmrkvXzM2721Q8oAP4FxAI3A/uBD4EqQDsgF2hqbd8F6G4dtzGwBrjLaX8GaA4MtL7T6dbridbP11uf7QykA+3KOMfvAllAHyAeeBn4ueTxrOeTgGlW3I2B9cCN1nvXWd/zbut7XgZkWucyETgMtLK2rVNWXPoIr4eWEFSFGWMOA71wXJTeAPaLyHQRqWW9vx2YC1xlfaQ/kADMLLGrHfydBK6ljNIBcB6wwRjzvjGmwBjzEbAWGOq0zTvGmPXGmBzgE6BjKfvLB54yxuQDHwOpwMvGmCxjzJ/An8Cp1ndaaoz5zTruFmAC0LfE/i4BJgKDjTGLrNeG4Ehw71ifXQZ8BlxcxncFR4lqnjEmD3gEOENEGjhvICLROC7yY6y4twBjcSTNYvuAl4wx+caYKTjO+XnWe0VAexGpZIzZbX1vFSE0ISifMMasMcZcZ4ypD7QH6gIvOW3iXG10NfChdeEtaRKOu9gRwAdlHLYusLXEa1txtGEU2+P0/CiQVMr+DhhjCq3nOda/e53ezyn+vIi0FJEZIrJHRA4DT+NIIM7uAj4xxqxyeq0R0M2qasoQkQzgSqB2KXEV2178xBiTDRzEcQ6cpQJxnHheSp6TncYYU+L9usaYIziSya3AbhGZKSKtPYhLhQlNCMrnjDFrcVRxODcQfw7Us+rRL8T93f9nOO5WNxljSl7sS9qF4wLrrCGw09uYy2E8jtJIC2NMMvAwICW2uQS4QETucnptOzDXGFPV6ZFkjBnlwTGPlwZEJAlHNc+uEtuk4yjpOJ+XkueknohIifd3ARhjvjXGnIOjumgtjhKfihCaEFSFWY2k94pIfevnBjju8H8r3sa6+5wKvANsNcYscbUva7uzgJs8OPTXQEsRuUJEYkTkMhw9l2ZU6At5pgqO+vZs6y7a1QV9F47qsTtF5DbrtRlWzFeLSKz1OE1E2nhwzMFWA3Uc8ASw0KqOO84q4XwCPCUiVayG53s4sbSVZsUUKyKX4Giv+VpEaonI+SKSCOQB2UAhKmJoQlC+kAV0AxaKyBEcieAP4N4S272H48611LYBY8wSY8zGsg5qHD2UhljHOQA8AAwxxqR7/Q28dx+ObrFZOO6ip7iJcRuOpPCgiNxkjMkCBgCX40gYe4BncTQUl+VD4DEcVUVdcFQ1uXIHcATYBPxsfe5tp/cXAi1wlCaeAi62zmUUjnO5yzpGX+A2VMSQE6sSlVJKRSotISillAI0ISgVNETkTxHJdvHQwWEqIDQhqKAiIltEJKfEBbFk10pv99lPRHb4KkYPj3mXNeL3sIjsEpEXRSSmtM8YY9pZPY5KPia7OUaciEy1zpkRkX5lxDRHRHKdzuu6CnxFFYY0IahgNLTEBbFk18qAKutC7sZXQGerS2p7oANwp08Dc/gZx4C/PWVtaBntdF5b+SEeFcI0IaiQISLdxTGjaoaIrHC+IxaR60VkjTUHzyYRucV6PRGYBdR1LnFIiemkS5YirLvuB0VkJXDE6tZaV0Q+s+YI2iwibi/wxpiNxpiM4t3hGAHss+mwrWMcM8a8ZIz5Ge0eqnxAE4IKCSJSD8dUF0/iGJB1H/CZiNS0NtmHowtqMo55gl4Ukc7WuIZBwK5ylDhG4BgkVxXHBf0rYAWOUb/9gbtEZGApMV9hjWJOx1FCmFDKthmlPB7yMF5P/FdE0sUx4V8/H+5XhQFNCCoYfel0MfzSeu0q4GtjzNfGmCJjzGxgCTAYwBgz07orN8aYucB3QO8KxvF/xpjt1jxIpwE1jTGPW3fmm3CMP7jc3YeNMR9aVUYtgdc5cRqMkttWLeXxTAW/R7EHgaY4EtpE4CsRaeajfaswoAlBBaMLnC6GF1ivNQIuKTEHUC8cUywgIoNE5DdxTCmdgSNRuJzq2gvOo4Ab4ah2cj7+w0CtsnZijNmAY2K81yoYT4UYYxZaE97lGWPeA37BSqhKgWP6XaVCwXbgfWPMzSXfEJF4HHMgXQNMM8bkWyWL4vl6XI2+PAJUdvrZ1eRyzp/bDmw2xrQoT/A4/q+5vRsXkexSPvu0Mebpch63NIaT519SEUxLCCpUfAAMFZGBIhItIglWQ3B9HLN7xuNYv6BARAbhmB6i2F6ghoikOL32O465gaqLSG0cM5OWZhFw2GpormTF0F5ETnO1sYjcJCJp1vO2wBjgB3c7d9PdtPjhNhmISLyIJFg/xlnn5aSLvIhUtc5dgtVAfiWOtRW+LeN7qwiiCUGFBGsSt2E4qmn247hjvx+IsuYHuhPHpG6HcMwxNN3ps2txLNW5yaruqYtjdbYVwBYc7Q0u5yJy2kchjnUWOuJY1jMdeBNIcfORnsAqa26nr63Hw262rYh1OKblrofj4p6DNdOpiDwsIrOs7WJxNMjvt2K/A0fVnI5FUMfpXEZKKaUALSEopZSyaEJQSikFaEJQSill0YSglFIKsHkcgojcjWOpRAOsAq43xuS62/6ntfu0BVwppbxQtXIsnRpW82i8iW0JwZqb5k6grTEmR0Q+wTENwLvuPvPXvtLG7iillCqpdkoCnRpW82hbu6uMYoBK1vTClXGs5aqUUsoGtiUEY8xO4HlgG7AbyDTGfFdyOxEZKSJLRGTJvOkfBTpMpZSKGLYlBBGphmPkaROgLpAoIleV3M4YM9EY09UY07XP+SMCHaZSSkUMOxuVz8YxWdh+ABH5HOiBY84ajwmGlNgiEqLBxRQutjPGkFsImflRGJ1HTCkVxOxMCNuA7iJSGcf8K/1xzG/vlZTYIqomJlAkMRCECQFjSDAFcCSXjPxou6NRSim37GxDWAhMBZbh6HIahWPRDq8kRBO8yQBAhCKJIUFzgVIqyNk6DsEY8xjwWEX2ISLBmwyKiQRldZZSSjmzu9upUkqpIKEJwUeW/PwjNw7txfWDz2DKm6/YHY5SSnlNE4IPFBYW8upTD/Pka5OZOG0uc2Z9ydaNuu6IUiq0RNSayv+4ZjiZhw+f9HpKcjIvT/qi3Ptdt2o5dRo2pk6DRgD0HTSMX3/6lkbNWpV7n0opFWgRlRAyDx+mxchxJ72+YeLoCu33wL491Kxd7/jPqbXqsG7l8grtUymlAk2rjHzA1TKk2qtIKRVqNCH4QGqtOuzfs/P4z+l7d1M9rZaNESmllPc0IfhAq/Yd2bV1M3t2bCM//xhzZ02je7+BdoellFJeiag2BH+Jjonhtoef5pFbR1BUWMiA4ZfTuLk2KCulQktEJYSU5GSXDcgpyckV3vfpffpzep/+Fd6PUkrZJaISQkW6liqlVLjTNgSllFKAJgSllFIWTQhKKaUATQhKKaUsmhCUUkoBNicEEakqIlNFZK2IrBGRM+yMp7xeePRuLuvbnluG97M7FKWUKje7u52+DHxjjLlYROKAyjbHUy7nDLuUoSOu5/lH7rQ7FKVs99/RI8jOzjrp9aSkKowZ95ENESlP2ZYQRCQZ6ANcB2CMOQYcC8SxMw8dYNxj/+COx/+P5KrVK7y/U7qewZ6d230QmVKhLzs7i6Y3nbxI1KY377AhGuUNO6uMmgL7gXdEZLmIvCkiiSU3EpGRIrJERJbMm+6bu4sfv5xM0a4V/PDFBz7Zn1JKhQM7E0IM0BkYb4zpBBwBHiq5kTFmojGmqzGma5/zR1T4oJmHDrB89lReurA+y2dP5XDGwQrvUymlwoGdCWEHsMMYs9D6eSqOBOFXP345maHNoUWtSgxtjpYSlFLKYltCMMbsAbaLSPG0oP2B1f48ZnHp4IouKQBc0SVFSwlKKWWxu5fRHcBkq4fRJuB6fx6suHRQIykWcPxbXEoYfn35ewj994FRrFy8gMMZB7mqf2euuv0+zr3wCl+FHbG0t0poSkqq4rIBOSmpig3RKG/YmhCMMb8DXQN1vFWL5jN/dy4frdxxwutV98+vUEIY87/xFQ1NuaC9VUKTJuvQZXcJIaD+Nf5Tu0NQSqmgFVEJIRwYY1j+1dsUZe466b2iwiJST+lD0y5n2hCZUirUhXxCMMaAMSBidyjuGeOI00uHD6aTcySLdbM/IC02B4CioiJGdGtEn/auO2RNmPUra9+fjTGGg1KV5mdeSuWkZKpUrVGhr6CUCn8hnxByCyHBFFBETHAmBWOIMgXkFnr+kYN7drDu56+olrWBUxqnMuK8hjSrl+rRZ28Z1OH48zVb97Fw/Ucs27ifozXaEp+YTLt+FxAbF+/tt1BKRYCQTwiZ+VFwJJeEaJAgTAjGGHILrTjLkHs0m6VTXyG54ABPDO9EvZoVW6O5TaM02jRK49qzDTv3Z3DwcCb/mziGKg3b0WnoDUF5vpxpbxWlAkvKU5VhlzfmbQqdYL2QeWA/f3z9NlUL0nnk4i7UrObfC97Pf2zj/fkbiW3cjRbdB1K5SrJfj6eUsk/tlASGdqjr0d2fJgSbLf96EmmHV3NZ79a0aVQzoMf+dulGvvh1I7XOvI76rToF9NhKqcDQhBACtqxYwJbfZjK8Q3Uu7tXatjiMMfzz/Z/ZnluZU4bdStUaabbFopTyPW8SQsi3IYSiv5b8RPKWH5h8ew+7Q0FEeOqa3hzNPcbtE56myZA7SKvXiOgY/dOIZDpKPDLp//oAOrhnBxt++5aWbOWBy4NrcbjKCXGMv7Ufb3z7Pj99dYTeNz1OfEIlu8NSNtFR4pFJE0KA7N26jl3fvc6DQ06lRYNudofjUkJ8LHecfxoXp2dy78QxVGl0Cl3OD/7eSEop37B1TeVIsXvTGvbOnsArI8+iRYPgr6Ovk5rCh/ecww1tC5j/7tMUFRXZHZJSKgA0IfjZzvWrODT3LV4eeSbR0aF1uru1qc9dfdOY/+5TmhSUigChdYUKMdvXLiN7wXuMvbEvUVGheaq7tKzL/f3r8P0r97F6zhd2h6OU8iNtQ/CTXRtWcWzJxzx3Y7+Qr4Pv0KwOn95bh89+Xsvs6W/R5fwb7Q5J+ZmOEo9MOg7BT356+ylevbwl1ZIr2x2KT3312wambU2g20W32h2KUsoD3oxDsL0eQ0SiRWS5iMywOxZfWTLtLS5qmxB2yQBgaPcWXNQ0j98+HWd3KEopH7M9IQD/ANbYHYSvLPp8AufVPcxFNo4+9rdBpzVnRGv49eOXyjWtt1IqONnahiAi9YHzgKeAe+yMxRdWfvcxFzbJZdBpLe0Oxe/O7tSEmOitvDN5LD2vvDfk20mU8kS4j+C2u1H5JeABIORbqvLz8tjz568Mun+A3aEETL9TGxEbvZ0J7z9Lr6sf1KSgwl64j+C2rcpIRIYA+4wxS8vYbqSILBGRJfOmB2cGLiwoYM7ER3jx2tPsDiXgerZrwOge1ZmnA9iUCnl2lhB6AueLyGAgAUgWkQ+MMVc5b2SMmQhMhODtZbR320Z6N0uiflo1u0Oxxemt63FPdBRj33mSPtf/M2THXCgVTOyonrItIRhjxgBjAESkH3BfyWQQKv6aNZG3b+9rdxi26tyiDg9FR/H0m/+h343/Iio62u6QlAppdlRP2d2GEPI2LJpN71Y1iI3RC+ApTWtx6Sn7WfDHUlp2ON3ucCok3BsPlXIlKBKCMWYOMMfmMLxmjGHnktn8787ILh04O6dLCz5/fTK1GzQluXqq3eGUW7g3HqryCfcR3EGREELVn3OncW3PenaHEVSSKsfz+qi+3DbhCdpf/jBVU2vZHZJSPhPupUNNCOWUl5vD3pVz6X17H7tDCTqJleJ5fVQ/bnv9aVpf8iDV0+raHVJY0+ot5SuaEMppy+rlXHpaXSrFx9kdSlCqFB/H+FvP5Jbxz9D1hqdJqJzk0/3rRfBvWr0VnuyontKEUE575n/IsLsG2h1GUEuIj2X46Y2Yt2webXoN9um+9SJYuj3bN3EofR+PXDfkhNcjMWGGKjt+T5oQymH13Gmc17VxyC14Y4cLerTmr88Wsu63aFp1D50EGuqNh4WFhcQmVT8paWrCVKXRhOCloqIiMtb+wogIH3fgjfsuOp2Xv/yVNT8X0KbXeXaH4xG9i1aRSG9xvbT0q3e4/ZwWdocRcv5xwWk0z17G6nnT7Q5FKeWGlhC8cDQ7i+wtK+h6UehUfQST24Z0ZuKsFfzxYz7tz7rI7nDChqvqrUPp+4hOrsnOLRtOev2/o0doCUi5pAnBC1v+WMzoAVo6qIiRgzrwzuyVLPnmQ1r3GUZC5cRy7SfU6/h9ydXF/ZHrhhB/9p3EpzY84fXYpOoue2cpBZoQvJK5cwPV6offKmiBdv05p1Jv6UY+e28MNXtfSaP23bzeh7d3uJHWTTUpqQrbP/4nsUnVT3g9OqEycMyeoFTQ04Tgodyj2VTPWk+bxmfZHUpYGNClGed0bspjk79kc2ERTTqc4dfjRVo31THjPuKR64ZE1HdWFaeNyh5a9tk4ruvfzu4wwoqI8J8re5K67TvmvXY/e7b+RVFhod1hKRWxtITggezMQ6QUHKB9k1PsDiXsiAj3DO/KsfwCXv3qfX6clkX9HhfQqktvXYEtSERadVsk04TggeWfj2PcFd7XcyvPxcXGcPeFp5OZncPclYuY9Mo0ElNSadRtIA1ad7Y7vJDkq4b3SKtui2SaEMqwZ9Ma6scfITmxkt2hRISUpEqc36MN5/dogzGGZ6Z+ycrFX5KZH8Npl99LbFw80TH+/bPNyjjIx8/dz4gHnicpJXRXwdO7d+UtTQhlWDt/Oi9f1MnuMCKSiDDmEkfJbMe+Q0yY9gQHD+dwrG5nkus2R0Ro0aGbR6uzeXO3vHjWFGL2rmLR1x9z1ohRFf8iSoUITQil2LdjC6cmHSa1qm9n6lTeq59WjSeu7g3Ayr92kpm9lMyjebzzf5+QWKM2BqHdOVdQo04Dl5/39G45K+Mg6+Z9wavD63H7jC84ffDlIV1KUMobtiUEEWkATAJqA0XARGPMy3bF48qab97liWGt7A5DlXBq878XJRrSoy0Ax/ILePj9ceyQBIqKDLENOpRrNPTiWVMY2gKap1ViaIsjWkpQEcXOEkIBcK8xZpmIVAGWishsY8xqG2M67vDBdBpWyqFJnRp2h6I8EBcbw/M3/L1Y0RcL1vPb+w+wNT2Hen1HEB0TR5Wq1andsKnbfRSXDh67LAWAEZ1TuGKKlhJ0VHjkEGOM3TEAICLTgHHGmNnutnlj3qaABTtv4iO8fn0XEivFB+qQyg9y8o7x88rNGAOL/trP6uwk4isn0fG866lcJfmEbX/8aDwtd3/B6N5/rwU9bn466+sM11KCClm1UxIY2qGuR324g6INQUQaA52AhS7eGwmMBLjq3ifpc/4Iv8ezffUS2qRGazIIA5Xi4zjnNEe134DTW5FfUEhG1lH++fHj5CfWpttldx9vlN6w/BeW78tlysodJ+wjac8vmhBURLC9hCAiScBc4CljzOelbRuoEsI348Yw9R89dWBUmFuzdR9vfb+GQ9m5VGrbn6adelGlasWrCHUglwomIVNCEJFY4DNgclnJIFA2/rGE81pX1mQQAdo0SuP5G9MA+Gn5RiZ/9CMmrQ0dB11VoTWgdSCXClW2zWUkjivuW8AaY8wLdsVRUk52Jm3qR24DYqQ6s1MzJt5+Nv/ul8SCNx/lqE4RrSKQnSWEnsDVwCoR+d167WFjzNd2BVSQf4z9v31Jpzv62xWCslFUVBQNalXj1ZvO4L9Tn2BPQRVaD7iatPqN7Q7NFlr1FXlsSwjGmJ+BoKqX2bxiAYM61SMhPtbuUJQPpGdkc8szHzBxzNXUSPF8IZ7qyYk8d0NfNu5M54tf32Tp8qZ0GXqDHyMNTlr1FXl0+msnB5bM4Or+OqNpuJg0cwGH9mznvRm/lOvzzeqlct/FZzCoViaLv5jg4+iUCj5B0e00GKz4ZjIXd3M97YEKPekZ2cyYu5jxF6YyasZirh3S06tSgrNhPVqSsHQTn024h6odB9Gi2zmlbq8DuVSo0oQA5OflcXDDYgYNKf0/ugodk2YuYEjzKFqlxTOkeS7vzfiFe64cUO79DezSlIFdmvLiF7+wZkEBbXoMcrut1q+rUOW2ykhEThGR30Rku4hMFJFqTu8tCkx4gbFqzhc8OKSt3WEoHykuHVzT2VEiuKZzIjPmLuZA5pEK7/vu4afRLHMJ8958lN2b1lR4f0oFk9JKCOOBfwO/ATcBP4vI+caYjUDYtLoeycokd8MC2g082+5QAqa8ja2horh0kJrk+PNOTYphSPOoCpcSit0+tDOjioq45803KSq4jnotQ6vdydPeQ1r1FXlKSwhJxphvrOfPi8hS4BsRuRoIjgmQfODwgf2cfUpdYmLKnlM/XDg3tvriAhls5ixbz659eXy4at8Jr9fdu95n3zcqKooXburHA+9MYnvRiJBa1c3T3kNa9RV5SksIIiIpxphMAGPMTyJyEY6RxdUDEl0AbPltJkO7Rc6Mpr5sbPW38pZkpo8d7ceoHIpjm/DQVbz01afMXzCDU86/haqptfx+bKX8pbRup88CbZxfMMasBPoDQTHNhC9Uzd1JpxZ17Q4jYE5sbI0qd5dMb6RnZHPRQ697XYdf0W6j/lQc26SZC3jy6l5MuPYUVk95moP7dtkdmlLl5raEYIz50M3r24Cb/RZRAKXv3kZypcipKiouHXxyqaMO+JrOiVz6if9LCeWpogrmkoy72Mbfeia3T3iWFsPvc7tym7d0tLAKpIgemLb2qwk8fGl3u8MImNIaW/3F+eLpTU8fO0oynnIXW0J8LONHncXWr17k5w+eI/doxXs1Fdf3l3y4ShJKVVTEjkPYtHw+XeonEBcbOacgEI2tJZVnPIBdJRlPlBVbXGwM42/rz75DWfzjrUc544b/UCkxuHrlaO8h5U7kXA1L2PXHrzx1bUe7wwioQDS2Oivvhd3f3UYrwtPY0qpVYdwN3Rn91qPU7X4BLbv0Pr4Qj920qkm5U2aVkYi0FJEfROQP6+dTReSf/g/Nf/Jyczi6f0dYrXlQ3oZbfyrt4llavHOWrefDVXl0fXXf8ceHq/KYs2y9y+ME8rt7E1uNlEQm3tKbs6OXMvet/1BYUOD3+JSqCE9KCG8A9wMTwNHTSEQ+BJ70Z2D+tGnVYm7r3ySsEsKkmQs4uHsb59zxIrNfudv2qhUovYoKcNvQ7G1JJpDjKryNrUpiAoO6taZR7b289M7DRNduQ+eh1/spOqUqxpOEUNkYs6jExTOkb3Wy9m6jWtvKdofhM8VVM0/0r8x1nx7gtak/8eiNQ+wOy+3FMz0jm0sfeNknPYjSM7L5/IeFJEohX/ywMCjaGVxp26gWE0fVYsbCDXwy+QWa9RxK3cYtyvycv+r7tfeScsWThJAuIs2wRieLyMXAbr9G5UfH8nKJ27mYjheEzwjdSTMXMKgp1IzL46Yucbz3zQJuu/jMoLwwgm8nnps0cwE1Y3PJPJJPamLFJ7HztyHdWtCq7j4+nv82P36fQLsBV1KrYXO32/vr4qxrHShXPOl2ejuO6qLWIrITuAu41RcHF5FzRWSdiPwlIg/5Yp9lWT5tItf0bxeIQwVEcelgcDNISRBu6pRAvMnjtak/2R2aS+4mnlu/bZ/X7QDFpYOD2XmMH5LIwew8vvhhYVC1o7jSokEaj17Riw9u7syeb19l9+a1doekFFBGQhCRKKCrMeZsoCbQ2hjTyxiztaIHFpFo4FVgENAWGCEifp1yNPfoERKyttGtdT1/HiagiksHMYW51KgcRWpiFFeeEsuH3ywIyguju4bmB8d96vWo5OLSwbBWsbRKjWZYq1hSY3ODasxCaaKjo3h55FlUWvEhP7z8DzasWExhQQFZGQd545Ebyc48ZHeIKsKUWmVkjCkSkdHAJ8YYX19dTgf+MsZsAhCRj4FhwGofH+e4JVNf4blLu/pr97aYs2w9azdn8caCYyQn/N3OE48EZfWJq4bmgsIico4eZPoNdb1qU/h24Ro27Mrhvm4JrN6fT7f6wrjFOeQsXBN039ud6Ogo7r+4G1lHcvl+2Y98MO5D0rPzidm7ikVff8xZI0bZHaKKIJ60IcwWkfuAKcDxpGCMOVjBY9cDtjv9vAPoVsF9urV7yzqqFaaTVi20piouy/Sxozn/3nHs2pd+wusxOC6+wXZhdNXQ/MLk72DnUq/bFAZ2a8PA+kfp2Tbl+Guj0zOhXptSPhWcqiQmMLx3e85o24ABo1+gf7N45s77gtMHX05SSrWyd+CBrIyDfPzc/Yx44Hmf7E+FH08SQvHq4rc7vWaAphU8tqtufJaSAAAgAElEQVQ+nydNqy0iI4GRAFfd+yR9zh/h9YF2b1rD/h/f4KWbz/T6s6Eg0APOfKkio5LtGHntbx9+s5DrOiXQuV4cS7btZepLDzN89OOk1KhZ4X0vnjXleMlDRysrV8QYe5Y2EJEzgH8bYwZaP48BMMb8191n3pi3yetgd65fRcbP7/LCTf2IioroqZu85mr6aV8vrlNcOrinz993+S/My4R6XUL2ol5exd1xP7m0CqlJMaRnFzDg3YN06HIaTc67gzpNy1/yyco4yAdjLufVIYncPuMIVz8zxWclDxXcaqckMLRDXY8GXXkyUvkaV4+Kh8lioIWINBGROOByYLoP9nvc9rXLyP51UsQkA1+P2HU1/bSvp6T2dlRyOHPV4H7VqfG0qxlD7m+TmP/mY2RllK+mdvGsKQxtAc3TKjG0BSz6+mNfhq7ChCdVRqc5PU/AsR7CMmBSRQ5sjCmwGqy/BaKBt40xf1Zkn8WOZmex5Y+FJPw1m+du6BtWI5JL48sRu66meDbG+HxK6tIGr1300Os+K4mEwrKh7qvANjB97GiyjuRy2xuP0/GKf5JcPdXj/WZlHGTdvC947DJHKWxE5xSumOLb9gkVHspMCMaYEyoaRSQFeN8XBzfGfA187Yt9FTu4bxdrPnmWi7s34rxre0dMMvDV+gHFF86OLeufNHgM8NmAsrL4ejqKUFg2tKy2oCqJCbx+Sx9um/AE7S9/2OPV2YpLBzUSHUuh10iMPV5K0F5Myll56lGOAmWPuQ+wvNwcfpn0X7Z/9QKvjzqTId1bRUwyAN+tHzBp5gL27tzKZ9/9esLgsWk/LuKLHxaeNKDMH2MdyruGQqD2Z6fESvG8Pqoff37s+epsG5b/wpSVufR+dcfxx5SVuWxYHhrjNVTglFlCEJGv+Lv3TxSOQWSf+jMobxzat5vV331AtYL9PDO8E7VrnGp3SAHnaU+d4rv//952IWNe+/z4v8XVKMX76d0ojmN5OVSt5LhfSE2KoW+9Y6zaW0hqUo3jr/lrSmpfTm3hj/3ZrVJ8HK+P8nx1tlv+90GAIlOhrsxeRiLS1+nHAmCrMWaHX6Nyo7iX0eGD6Wxc+C1ZB3bTPDadi3u2ol3jNDtCCgqe9tR5YfJ3zJg9l5S0umTu23X83yHn9OWeKwcc38+c9Zms3XcMYuJITkwAYN+hLPILoV7qid0S66al+rTbq6ueNpd+ksWnz91V7iowX+4vmBzLL+C213+k6fl3U7NeY7vDUUHKm15GnjQqDzbGPOj8gog8W/K1QMjPy2Pxp/9H3NE9PDjsVBIr1aNW9dAbhORrnvTHL777/79hNbjgnY28e3kdrvt4Ix9cWZeHf1jMkN4dj5cy7umTYtuF09eL4zjvL7+giIyMQwxqGhvypQTAsTrbqP7c+Mab1Lw5ZGejV0HEkxLCMmNM5xKvrTTGBLxu5o7rLjYPDDuVBrW0Z4S3iu/+r2gnvPbLQaJjYiksyKdqchWIimF+ejK9Uw/bPh5g4J0v8/v6bdSpnkhM9N9NXOUtiTiP4j58JJecnFwqVUqgdZP6IT2gz9krXy1jc5XOtOl1nk/3q1NkhweflBBEZBRwG9BURFY6vVUFsKU16pWRfcveSJ2kuHQw+aIkMg+lc+tpCVw6JYsJwxK54+vDvH5JXV6csJmtVRP5cFXeCZ8N9Kjfgd3akH94H0PO6eXyuCXbQcrqRlp80U/PyKbbtf/h1SFJ3DkrlxW7cmh61QsApFaJZ9H4293uI9jdMbQzT330M7u3tqZOo2Y+269OkR15Sutl9CEwFMdgsaFOjy7GmKsCEJvykeJqEynIISVBqFkJzm8Vw7fr8xnSIoYZq7O5u28NrhrckyXv//v445tX7iM2NqbMXjm+GhDnSW+g4u6j3s6OOmnmAoa0iKJNy8ZcclotqrXrTbubx9Lu5rGkZ+WVvYMgd1nvVqz/cYrdYagQ5zYhGGMyjTFbjDEjrOmuc3D0NkoSkYYBi1BVWPFo4NPH76fLhCwavpjJC78e46n5uYz9NZfn5ma4HB3s6ahkX41eLqvrrHM7yKq1G3l8QFUmTJ3Nhu373OzxxM9d1CYOgIs7VOHo2nkcO3pydUioal6/JvVjDpFzJHy+kwo8T7qdDgVeAOoC+4BGwBogfFaZCXPlqSv3dKCbLwfEldV19ngDcdwxrmgfww9rMxjSDB545VO++J/7Kp/iz1WzutFWT4xhWAth9tLvaND7Iq9jLY9AjJS+b3hX7pr8LH1HagOzKh9PBqY9CXQH1htjmuCYukJHtIQ5Twe6lbadN1VJpfUuKt7XjLmLGdGhMplZR7j1tARmrz/KdZ3iWLV2Y6mlhOIS0oD3s+g/cQ/9J+5hyh95HNq8yptTUiG+ngPKldSqSTROLtIV2FS5edLtNN8Yc0BEokQkyhjzk4g86/fIlG28GehW2nbeTBdRVtfZku0gNRIc7SCz/yrgivYxpZYSiktITa96gXY3jy33eSkvX5WiPPH4FWdw/TufUafJIxXel06RHXk8SQgZIpIEzAcmi8g+HAPUVJjydCxAadtdc14PZsxdzNPnVuPi92cztE9HWjRwP3jw7UevK7VKpThhvDQvi8KCIgqLioiOgiIDdarEsD9nMwcyj5R6oU2tEs+fb9zr8nV/CuRI6aioKHIyD1JUVFThGX61a2nk8WQcQiKOBuUo4EogBZhsjDng//BKWPCKPYs3RBhXK7DByWMBStuuX+eWsHMpFBXw6fJD1G7U7KQ7eOd69fdm/MKM2XOPj5oujT/WUChvHf/po1512UupuCurHSOlZy1cz3d57WjbI7QH3inf8OlIZWPMERFpBLQwxrwnIpVxTFetwpSnjdDu7uqLL4Ljz6vMqKm7eGNYIhd87Kjndy4lFFcpvfrpT8xZtMLjKhV/rJRW3tlQ07PyXFZDFZdEfD3y2hO9T2nE+6/PokWXvsTG+7f0o8KLJ72MbsaxhGV1oBmOtZBfx9G4rCKYu4to8UVw5ppshrSIoX1azEn1/M716he8v4AbuiR5XKXi6xHG/qzjt2OZz6TK8fRqWZ3szINUS6vjl2Oo8ORJG8LtwOnAQgBjzAYRidyZ5BRQ+kV0zrL1bNudw+GsbN4fnsDq/fkMbB7DlOlbjtfzFyeNGpWjiTd5DG7297Tanq6p7Cv+rOO3a3qM5nVS+HLZHKqd6/0a5CpyedLqlGeMOVb8g4jE8Pd02CpCldbddPrY0VxzXi9G965Jz7b1adukLj3b1ueWHtV5b8Yvx5PJNZ0TmbQkkytPiSWmMJeCwqKTupv6m3Ms4N91HgJpQJdmFG1bQlFRkd2hqBDiSQlhrog8DFQSkXNwzG/0VUUOKiLP4ZgG4xiwEbjeGJNRkX2qwPGkW+rsRWtYvfEAH6zIJSrq7/asunsdo6GL69W/XZvN77tyeX1xHtFx+cen2667dz3XnNfD74O57KjjD5RRA9swbuo4Tr/0TrtDUSHCk4TwEHAjsAq4BceSl29W8LizgTHWusrPAmOAgE+nrcrHk4voOae3IS9zH0PO6XnShdXRO8lRr34wC+olR5FREHfSDKQvTP7Oo4Zeb3oIldy2onX8dnVl9USHZnUws34gO/OQrp2sPOK226mINDTGbPN7ACLDgYuNMVeWubF2Ow0KZXVLPd7LaEhlRs046raLZWnbeboP+HvhH0+7rHq6bTjYc+AwY745QI/LwmOqb+U9b7qdltaG8GXxExH5rMJRuXcDMMvdmyIyUkSWiMiSidN0xoxAKW3aieljR58wK2rxo/ju3hfTXni6j/SMbKb9uIiHekQz7adFpdb9h9Payp6qVb0KKdmbPF5/WUW20hKCc0Zp6u2OReR7EfnDxWOY0zaP4Bj1PNndfowxE40xXY0xXUcO6+ltGKqcyjv3jqeNtKVt501D76SZC+hb7xiNqhTSt+6xUuP1NMmEExHhmjNbs/7nGXaHokJAaQnBuHnuEWPM2caY9i4e0wBE5FpgCHClKWu4tAqo8t5Jp2dkM+COlzizIW4nqStWWjtEWRPdOR9v2o+LGNykkEbVYhjcpNBtKSFcexN5onPLelQ+sJrCAp1xRpWutITQQUQOi0gWcKr1/LCIZInI4YocVETOxdGIfL4x5mhF9qV8z5vqGudqpUkzFxCVe4h3lhym66v7jj9crbVQPAOp83YfrMjllU9/YvaiNSe95269hr71jtG4WjQJMVE0rhZ9QinBOT53SebVT3864Tv4arGfYHP7eR2Z++a/KSjItzsUFcTc9jIyxvhzeopxQDwwW0QAfjPG3OrH4yk3Sva68XSmUzixWql4MrvJV9cvsxEYXA/YKm7wPeOUZqz4a0eZvYZmL1rDHxuymLE6iqgoKCqC/UeLaJ+55vgMqcXxuetNVFC0lOqxx473ZCrvFBbBrnXDmjx+ATzz+et0v1SXwFSuedLt1OeMMc3tOK46WckLoKf98kuOVN6fcYQu1bJpViOlXP34S05lUbsyZe7jnNPbcE69oy4muWtzUnyuEtTfPZmSGTVjMUN6dwzYNNV2aFG/Jib9dw7t30O1mrXtDkcFoYrNj6tCmqu2AldVOe6qa4qrlfo1gE+/+4WlO3JZv+twuernS05l8UT/ymXuY86y9XywIpdGT26i8yt7T4jVk2qvkts8OO7TsG90fvWWvqz86CnycnPsDkUFIU0IYSA9I5sh945j6L2vlusi7HwBdNel9O1Hrztet16ygbaoMJ8GSYUY4P0lh6haKcqrC6qrqSxqxuUxqKmUuo/pY0dz1eCeNEuN56rBPU+ItawG5JLfYUSHyqxau5EhrRPcfiYcVIqP44Kujdi8fL7doaggpAkhDEyauYCNm7aSvmtruS7CUPYF0LlqqWS10ryN2Ww+VMTd3eP4aNUxGjz+Fx+syD2pVFFa/EOaO/4UZ/x5mOs7xpOSIAxuZkqNyV1vKE96KZXcRgpyuKJ9DDNWZ7v9TLi4uE8bUnfP56/FP9gdigoytrQhKN9Jz8jm8x8WUqOS4ZHesTzz0yKXS12WnNrBmzl8StbHx1dKYv9BRwNtUZFhx/58rmwfS+vUaC5oHcuXa/MZ3v80Hr1hiEffobjBd9yCDIY1h31HCwGIi8lnSPN4t20J7mYpddeAXHPnGn5ZtYmJY64+aZt9h7IoLCyiiAw+/LPw+Gf8OU21ncZc2p2xn89j3W8FtOo+0O5wVJAoc8W0oKJTV5zkhcnfMX/eXHrXK+KKU2N5bRlUbdXrhIuYq+kaPFkVrTiRdGxRn8QDq7inT8pJK5M9/uYMPv/mJ76/OoljhYVsPljAyBm5SGxl5r75iFeNsp6u1FYcmycrkZV3VbZgUN5V3LzxyvQlbE7uSpte5/ll/8p+Pl0xTQWv4tJBdF4e13RIJKWSMLhJPg86lRLcrVvgyTz9k2Yu4ODubXy4cRvzb3H0SinZDfXzn5ZyZuMo9h4ppLCwkFNrRTOwWQzL9ni2roDzRc+btQO8Wfe5PKuylSUQF+tAdIG94/yujJ+xnNVzC2jbd1jZH1BhTdsQQtikmQuoGZvLsFaxpCZGERslJw3OKu90DcWJ5In+lYk3eVjjRUhNiqFfAzhn9IscyDxCw9o1mL83kQs+zWf4lBz6vXeUGesLOJAjHjXKlneKDE96Qzknw4++XcCgpuKzHkTljdtTgZx3adSQTrQr+JM/fvTnlGUqFGgJIYTNWbaexdtyWbStiLG/5h5/PTo6io5H1h8fLObJILOSihNJrUp5nNk4itNe2UH1KpUAOJiVQ/XYguO9ktIzsjn71v8y+ZJY2qXFkn6kiEunHuXMhqWPJShZehnSuyNjXvuciWOuxhhT6h24pyUcf6zK5s8lN0vG7o9V3Fy5aWAH3p29iqXffcypAy7323FUcNMSQgibPnY0u2eNZeesF094bJsxluljR3s8J1BJzj2QWjRI45FzG9A4LZlvx93PN6/cR+O0ZCZfXf/4neukmQs4p1EhIrB6fz77jhbSoRa8s+RwqT2NXI0DcO7JVJE7cH+uyubvSfLsmnfpunNOoXul7fw+6wO/HkcFLy0h+FEg6plLU97FX8pKJCXvXB3HSeCnE2ZYjqF9s5Mbg4uVnCKjR4NYnvpuPdNvqM+D3y8kOkqYWIE7cOfvMOevI+w6nM8bS49BzI4TVmXz9q7bm6k9ysvOVdyuPLMtcfPW8OOMd+k85Dq/HksFH00IfmT3vDjlXeC9tG6beTnZJ10MS1sA56KHXneZEJ0veunZBVz+wU4apwjfr82gZiycUiuaVmnVXVaXeJJoT/wOCRCfQHK86x5L3gjExbqiq7hV1CV92hD9yzqmffgiTc8YTN0mrfx+TBUctNupn3iz4leoeGHyd7BzqYu5g7q4vFCVtjqZcxfTfRlHqBGbz1lNY1iwvRCRKF4YmEDXlnXIyCk6qTupnaueedM1NtRt3LGfyXPXkd2kPy1OP9vucFQ5edPtVBOCnzhfPEu7aIaS8owT8GQZzZ7X/4c3Bkfz7znH6Fw3mo0HDeOHVobYytSukXzC+QvHRBvsxn6+iF01z6BltwFERWmzY6jRcQg2C0Q9sx3KM06grF4y4z+bw7lNDJ3rRDOgWTQ7DhuW7c6n0/jDxMYeIa2ao/dUcXVJoHvfKLj3wtP5aM4qZv7fV7Qcdgd1mrS2OyTlJ5ru/aC8vXvChTfLaH42+1dGdk0gKSGOm7pUZvUB4aYeadx/zWC2zRh7wnrNkbzqmd1G9GvLe3efy4Ef32DXhlV2h6P8RBOCH3g6hXS48jQhettdNdITrd2io6N48eZ+FC37mHkTH+HwoZOrD1Vos7UNQUTuA54Dahpjyv7rCqE2hEjmaVuDtw20kdSgG+yyj+YxasIcavW+kmbtOhETG2d3SMqNkGhUFpEGwJtAa6CLJgSlTmb3WJbSHM09xvdLN/DhkgP0uelxYuPj7Q5JueBNQrCzyuhF4AFAL/JKueHvOZMqonJCHOf3bMdzl5/C3Df+ybG83LI/pIKaLQlBRM4HdhpjVniw7UgRWSIiSyZOC77/FMpzxQPVymoE9nS7cBfICe4qokGtarx0VSc2ffQoP3/wHCHVlV2dwG8JQUS+F5E/XDyGAY8A//JkP8aYicaYrsaYriOH9fRXuCoAPL3bDea74kDy95xJvlQnNYWxN/VjVLdk5r/3DEVFRXaHpMrBbwnBGHO2MaZ9yQewCWgCrBCRLUB9YJmI1PZXLMp+nt7thspdsb+Fahfb7m3qc1efVOa985QmhRAU8CojY8wqY0yaMaaxMaYxsAPobIzZE+hYVOB4ercbbHfFdlVfhXIX2y4t6/LA2XWY9/YTFBUWlv0BFTR0HILyO28GqgXyrtiTi71d1VehPpalQ7M6PHxuA+a+9R8KCwrsDkd5yPapK6xSggpj3ix3Gchpn8uajTYQC+G4Ew7jKto1rsVjQ6N47I1/0ffm/xATE2t3SKoMWkIIAuHeq8bTu91A3hV70lYRbNVXoahVg5o8dWErFrz3lN2hKA/YXkJQ9q+b4G+e3u0G8q64rEnywnWCQjs0q5dKnbjVHNq3m2ppdewOR5VCSwg2i9ReNXaWily1VXz+w0KG3vvq8XhCuVE3GD1+xRms+fQZDu3baXcoqhSaEGwWTtUS3lzk7Rxr4OpiXzM2l/RdW4/HE8yNuqFYxZgQH8vjl5/G5iU/2B2KKoVWGdkonKol0jOyGXDnS6TI0TKrvuxsrIWTl6gsKCzicFYO44ck8uxcRzzB3KgbqlWMiZXiOLRrrd1hqFJoCcFG4VQt8drUORw+dIAn+lcus+rL7lLR9LGjj6+zsOT9f3PNeb0Y3bsmAzvUCfrzH8pVjNWTE+lZu5DdWzfaHYpyQxOCjYK5WsIb6RnZfPTtAm7qEkfNuDwGNRW3F9VgG4EbbPGUxe5kWlE1UypTVKjjEoKVVhnZKJirJbzx2tQ5xJs8buqURIExDG5muGO266ogb8YaBGLq50CPfaiIcKhirFYlgYPb11OvaSu7Q1EuaAlBVUhx6eCK9rEkxRVRo3IUMYW5bksJ3pSKAtHwHCylNE9HTYd6FePArs05+sdsu8NQbmgJQVXIpJkLiOcYH60qYvLKXKIkiiP5QEw+rfevP+ku29NSUaAanoOllOZJQ3HJxvBidfeefJ6DWbXkynaHoNzQhKAqZM6y9UTFJbL/0GGqV4omPaeImtWSqV+7ZoUutmUNHAsnnia/YEleKnxplZGqkOljR3PV4J7c3S+NFfc34+5+aVw1uGLdNkOtobeiQr2hWIUPTQiqQvxx8Q6HunJPRVryU8FNE4IqU2kNnv64eAdLQ28gRFLyAzDGsF+TXdDSNgRVptIaPP3R0BlJdeXh0lDsqRm/rada1wvsDkO5oQlBlaqsBk+7Lt6BGKMQCJGU/ACO5OaTUq++3WEoN7TKSJUqWBs87ZwcT5Xf9vQsYuPi7Q5DuWFbQhCRO0RknYj8KSL/sysO5V6wNniG8nw+kSw9I5vfM5NIq9/Y7lCUG7YkBBE5ExgGnGqMaQc8b0ccqnTB2uAZrKUWVbrsnDyS0+raHYYqhV1tCKOAZ4wxeQDGmH1lbK9sEIwNnuEwn0+kenLqctpc/ojdYahS2JUQWgK9ReQpIBe4zxiz2NWGIjISGAkw4YHLGDmsZ+CijHDB2OAZSpPRqRKSapBcLdXuKFQp/JYQROR7oLaLtx6xjlsN6A6cBnwiIk2NMabkxsaYicBEABa8ctL7KrIEY6lFlW3qz2tJqtva7jBUGfyWEIwxZ7t7T0RGAZ9bCWCRiBQBqcB+f8WjwkMwllpU6YwxzFy+kx6j7rU7FFUGu3oZfQmcBSAiLYE4IN2mWJRSfvT2tyuo0+tSu8NQHrArIbwNNBWRP4CPgWtdVRcppUJb9tE8Zv+5jwatO9sdivKALQnBGHPMGHOVMaa9MaazMeZHTz734pdL/B2aUsqH5q3cTN0eFxITG2d3KMoDITVSeW/tPvx3yq92h6GU8sDS9buYvOIIrTr3sjsU5aGQSgjNup7JxqhGbNqpzQ1KBbtXZ6+nz/X/JCo62u5QlIdCKiEAtOp9Pk9MXUZO3jG7Q1FKuTFx1gpSWvUkKirkLjERLeR+W1VTa9H2soe55bU5HMnJszscpVQJc1dsZnlhE9r202muQ03IJQRwJIX2l4/RRmalgkxBQSGT522gRXcdJBiKQjIhgCMp/HEgip37M+wORSkF5BcUMnrCjzQ87w6qVK1hdziqHEI2IYgIzc+6nC8XrLM7FKUi3rH8AkaN/4GGQ+6iVsPmdoejyilkEwJAo5btWR3dig9+/NPuUJSKaBNmLafhoNupWa+x3aGoCgjphADQ8dwrmb3mEDrQWSl7bN1zkJ83HSGtflO7Q1EVFPIJAaDJmSP432eL7A5DqYizadcBHvzkT/qOfJLoGF2iPdSFRUKo1/JUft+ezaHDR+0ORamIsW77fh7+bA19b35c10kOE2GREAC6X/dvbnvzF47m6oA1pfxt9da9/HvaX/S7+QmdpyiMhE1CqFwlmbQO/fnlz212h6JUWFu5aQ9PzNxKv5v/o9VEYSZsEgJAuz5DmPR7DgtWb7c7FKXC0rL1u3h29k763fgvnaMoDIVVQhARel/zIO/M2Wh3KEqFnUVrd/LC3H06YV0YC6uEAI6kUKVtP16ZrtNaKOUrC1Zv55VfDtLnuod1wrowZstvVkQ6ishvIvK7iCwRkdN9uf82vc5jhWnGsnVadaRURc1ZuZWJi4/Q+5oHERG7w1F+ZFeq/x/wH2NMR+Bf1s8+1ar7AMZ/t5Zj+QW+3rVSEeP75ZuZtCqfHlfco8kgAtiVEAyQbD1PAXb5+gBVa6TR9IJ7uG38j+Qdy/f17pUKaxt3pnPp2Nl8ubMaZ1z2D00GEcKuhHAX8JyIbAeeB8a421BERlrVSkvmTf/Iq4Ok1mlI84se5NbxP+mCOkp5aN32/fzzi3WceftzdDj3SrvDUQEk/poDSES+B2q7eOsRoD8w1xjzmYhcCow0xpxd1j7fmLepXMEe2r+HP6f8l/G39qNygg6iUcqd1Vv38sSMzfS98TEdYxAmaqckMLRDXY+KeH5LCKUeVCQTqGqMMeIoi2YaY5LL+lx5EwJA5oH9rPzwKV67pQ9JlXWYvVLOjuTk8dvq7by16BB9b3hUu5WGEW8Sgl1VRruAvtbzs4AN/j5gSo2adLz6UW6ZMI/DR3L8fTilQkZ6RjY3jp/HV9mt6KsDziKaXQnhZmCsiKwAngZGBuKgVarWoOs1j3HrhPk6EZ5SwN6Dhxn99kJ63vQk7Xqeq2MMIpwtVUblVZEqI2dHsw7z2zv/4tHh7WnVsBbR0fqfQEWWvGP53P3WfI4mpNH14juIr1TZ7pCUn4RClZGtKldJ5owbn+CVPxO5/fUfyC8otDskpQImNy+fW8f/RKOhd9Pz6gc1GajjIrYbQaXEKnQ+dwT7d3TnhnH/xxnNq3PbkM52h6WU3xhjeOHLJSzbepgOlz5EtbQ6doekgkxEVhm5smnZPBL++obR53WkRkqivw6jlC3e+34VP2zIplHPC2jYtqvd4agA8qbKKGJLCCU17dyHnUkp3PnpLE6pksWFPVvSsn6q3WEpVSF/bN7DZws2kF6jC71vvsTucFSQ04TgpF7LDtRr2YH03Tt4ctZ7pEX9yUMXdSG1apLdoSnllZ37M3h+2u8ciq1F67Pv5NRade0OSYWAkKoy+mL5joAGm3Mkm7mTnueUuomMHt4jkIdWqtyemzKXTYeK6HvNvcQnVLI7HGWz1KR4ereoGbwjlUOdiIw0xky0O47SBHuMGl/FaHwVo/G5FpHdTn0gIAPpKijYY9T4KkbjqxiNzwVNCEoppQBNCEoppSyaEMonaOsenQR7jBpfxWh8FaPxuaCNykoppQAtISillLJoQvCAiEwRkd+txxYR+d3Nduv1BVMAAAbSSURBVFtEZJW13ZIAx/hvEdnpFOdgN9udKyLrROQvEXkogPE9JyJrRWSliHwhIlXdbBfQc1jW+RCReOv3/5eILBSRxv6OyenYDUTkJxFZIyJ/isg/XGzTT0QynX7v/wpUfNbxS/19icP/WedvpYgEbMIwEWnldF5+F5HDInJXiW0Cev5E5G0R2Scifzi9Vl1EZovIBuvfam4+e621zQYRudYvARpj9OHFAxgL/MvNe1uAVJvi+jdwXxnbRAMbgaZAHLACaBug+AYAMdbzZ4Fn7T6HnpwP4Dbgdev55cCUAP5O6wCdredVgPUu4usHzLDjb86T3xcwGJgFCNAdWGhTnNHAHqCRnecP6AN0Bv5weu1/wEPW84dc/d8AqgObrH+rWc+r+To+LSF4wVru81LgI7tjKafTgb+MMZuMMceAj4FhgTiwMeY7Y0yB9eNvQP1AHLcMnpyPYcB71vOpQH/r78DvjDG7jTHLrOdZwBqgXiCO7UPDgEnG4TegqojYMc1qf2CjMWarDcc+zhgzDzhY4mXnv7H3gAtcfHQgMNsYc9AYcwiYDZzr6/g0IXinN7DXGONuyU8DfCciS0XEjoElo61i+dtuip31gO1OP+/AngvMDTjuGl0J5Dn05Hwc38ZKaJlADT/HdRKrqqoTsNDF22eIyAoRmSUi7QIaWNm/r2D5m7sc9zdydp4/gFrGmN3guAkA0lxsE5DzqJPbWUTke6C2i7ceMcZMs56PoPTSQU9jzC4RSQNmi8ha647A7zEC44EncPwHfQJH1dYNJXfh4rM+62bmyTkUkUeAAmCym9349RyW4Mn58Os584SIJAGfAXcZYw6XeHsZjmqQbKvd6EugRQDDK+v3FQznLw44Hxjj4m27z5+nAnIeNSFYjDFnl/a+iMQAFwJdStnHLuvffSLyBY4qCZ9dzMqKsZiIvAHMcPHWDqCB08/1gV0+CA3w6BxeCwwB+hurYtTFPvx6Dkvw5HwUb7PD+htI4eQiv9+ISCyOZDDZGPN5yfedE4Qx5msReU1EUo0x6YGIz4Pfl1//5jw0CFhmjNlb8g27z59lr4jUMcbstqrT9rnYZgeO9o5i9YE5vg5Eq4w8dzaw1hizw9WbIpIoIlWKn+NoRP3D1bb+UKJedribYy8GWohIE+uu6XJgeoDiOxd4EDjfGHPUzTaBPoeenI/pQHGPjouBH90lM1+z2ireAtYYY15ws03t4jYNETkdx//pAwGKz5Pf13TgGqu3UXcgs7h6JIDcluztPH9OnP/GrgWmudjmW2CAiFSzqoMHWK/5VqBa10P9AbwL3FritbrA19bzpjh6qawA/sRRTRLI+N4HVgErrT+wOiVjtH4ejKO3ysZAxgj8haMO9Hfr8XrJ+Ow4h67OB/A4jsQFkAB8asW/CGgawHPWC0e1wEqn8zYYuLX4bxEYbZ2rFTga63sEMD6Xv68S8QnwqnV+VwFdAxWfdfzKOC7wKU6v2Xb+cCSm3UA+jrv+G3G0Sf0AbLD+rW5t2xV40+mzN1h/h38B1/sjPh2prJRSCtAqI6WUUhZNCEoppQBNCEoppSyaEJRSSgGaEJRSSlk0IaiIISKFJWa/bFyOfVQVkdt8H93x/bcWkV9FJE9E7vPXcZRyRbudqoghItnGmKQK7qMxjtkx23v5uWhjTKEH26UBjXBMcHbIGPN8eeJUqjy0hKAimohEi2OthsXWxIC3WK8nicgPIrJMHPP9F8+C+gzQzCphPGfNpz/DaX/jROQ66/kWEfmXiPwMXCIizUTkG2siuPki0rpkPMaYfcaYxTgGLikVUDqXkYokleTvxY02G2OG4xgpmmmMOU1E4oFfROQ7HKOqhxtjDotIKvCbiEzHMV99e2NMR3AssFLGMXONMb2sbX/AMUJ2g4h0A14DzvL1l1SqvDQhqEiSU3whdzIAOFVELrZ+TsEx2+UO4GkR6QMU4ZhquFY5jjkFjs9Y2gP4VP5eTiG+HPtTym80IahIJ8AdxpgTJgqzqn1qAl2MMfkisgXHvEYlFXBi1WvJbY5Y/0YBGS4SklJBQ9sQVKT7FhhlTTONiLS0Zu5MAfZZyeBMHA29AFk4lrMsthVoK461l1NwrMx1EuOYZnmziFxiHUdEpIN/vpJS5aMlBBXp3gQaA8usaZD34+jhMxn4ShwLx/8OrAUwxhwQkV/EsUj6LGPM/SLyCY4ZSTcAy0s51pXAeBH5JxCLY8nOFc4biEhtYAmQDBSJY1H4tubkhXGU8jntdqqUUgrQKiOllFIWTQhKKaUATQhKKaUsmhCUUkoBmhCUUkpZNCEopZQCNCEopZSyaEJQSikFwP8DVO/mX+tyPDwAAAAASUVORK5CYII=\n", @@ -613,7 +555,7 @@ " cluster_std=[2, 2], random_state=2)\n", "\n", "# Training a classifier\n", - "svm = SVC()\n", + "svm = SVC(gamma='auto')\n", "svm.fit(X, y)\n", "\n", "# Plotting decision regions\n", @@ -647,14 +589,6 @@ "execution_count": 15, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n", - " \"avoid this warning.\", FutureWarning)\n" - ] - }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAmMAAAIZCAYAAADqcTjaAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3Xd4VNXWwOHfmkkDAqGE0KT3Ik1A6QqoiIBiB1HwuzZUrgWvvV6xXrvYwGtBBHtFFBQFbCCISpEmvQVIICEBElL298dMuJOQMklmZk9Z7/PkMTlz5uyVSFbW2WcXMcaglFJKKaXscNgOQCmllFIqkmkxppRSSillkRZjSimllFIWaTGmlFJKKWWRFmNKKaWUUhZpMaaUUkopZZEWY0oppZRSFmkxppSKeCJiRKSVD6/3pohMrmx7IjJeRH70VVxKqeCkxZhSEUhE+onIzyKSLiL7ReQnEekpIr1F5JCIVC/mPb+LyA0i0sxdTCwv8nqiiBwVkS0B+0aUUioMaDGmVIQRkRrAbOAFoDbQCHgQyDbG/ALsAM4v8p5OQAdglsfhau7jBcYAm/0YulJKhSUtxpSKPG0AjDGzjDF5xpgjxph5xpgV7tffAi4v8p7LgS+NMakex94GxhU5Z3ppDYtIHxFZ6u6RWyoifTxeWyAiD7l76TJEZJ6IJJZwnVNFZIeI3CYie0Vkt4icKyLDRGS9u7fvLo/ze4nILyKS5j53iojElHDtfiKyXUROc3/dTkS+cV9znYhcVNr36CHR/b4MEVkoIk1LaC9BRKaLyD4R2Soi94iIo/Ap8oL7Z7ZWRAZ7vDBeRDa529gsIpd6GZtSKohoMaZU5FkP5InIWyJylojUKvL620B/EWkC4C4MxnB8oTUDuEREnCLSHqgOLCmpURGpDXwJPA/UAZ4GvhSROh6njQGuAJKAGODWUr6P+kAcrp69+4BpwFjgJKA/cJ+ItHCfmwfcDCQCvYHBwHXFxHgmrt6/840x34tINeAbYKY7ptHASyLSsZS4ClwKPORu8w/gnRLOewFIAFoAA3EVtVd4vH4ysMl9nfuBj0Wktju254GzjDHVgT7udpRSIUaLMaUijDHmINAPMLgKmH0i8rmI1HO/vh1YiKuwAVfhEoerkPK0A1gHDMHVQ1ZqrxhwNrDBGPO2MSbXGDMLWAuM8DjnDWPMemPMEeB9oGsp18sBHjbG5ADv4ipWnjPGZBhjVgOrgc7u7+k3Y8xid7tbgFdxFT6eLgSmAsOMMb+6jw0Hthhj3nC/dznwEXBBGd8ruHoSFxljsoG7gd4i0tjzBBFxAhcDd7rj3gI8BVzmcdpe4FljTI4x5j1cP/Oz3a/lA51EpIoxZrf7+1ZKhRgtxpSKQMaYNcaY8caYE4BOQEPgWY9TPB9VXgbMdBc9RU0HxuPqMZpRRrMNga1Fjm3F1bNVINnj88NAfCnXSzXG5Lk/P+L+7x6P148UvF9E2ojIbBFJFpGDwCO4ijdPNwHvG2NWehxrCpzsfryZJiJpuHq86pcSV4HtBZ8YYzKB/bh+Bp4ScfUAev5civ5MdhpjTJHXGxpjDuEq5K4FdovIlyLSzou4lFJBRosxpSKcMWYt8CauoqzAx0Aj97ip8yi51+sjXL00m4wxRQutonbhKm48NQF2ljfmCngZVy9ca2NMDeAuQIqccyFwrojc5HFsO7DQGFPT4yPeGDPBizaP9YKJSDyuyRK7ipyTgquHz/PnUvRn0khEpMjruwCMMXONMacDDdzf3zQv4lJKBRktxpSKMO4B6ZNE5AT3141x9WwtLjjH3evyIfAGsNUYs6y4a7nPGwRc6UXTc4A2IjJGRKJE5GJcMzRnV+ob8k514CCQ6e49Kq6Y2oXrkew/RaRgPNlsd8yXiUi0+6One4xcWYa5JwPE4Bo7tsT9CPgYd8/e+8DDIlLdPcj/Fgr3Mia5Y4oWkQuB9sAcEaknIiPdY8eygUxcY+OUUiFGizGlIk8GrkHhS0TkEK4ibBUwqch5b+HqsSl1LJgxZpkxZmNZjbpnYg53t5MK3AYMN8aklPs7KL9bcU0OyMDVe/ReCTFuw1WQ3S4iVxpjMoAzgEtwFWvJwONArBdtzsQ14H4/rkkFJc10nAgcwjVI/0f3+173eH0J0BpXL9rDwAXun6UD189yl7uNgRQzKUEpFfyk8FAEpZRSSikVSNozppRSSillkRZjSilVASKyWkQyi/nQhVeVUuWijymVUkoppSzSnjGllFJKKYu0GFNKKaWUskiLMaWUUkopi7QYU0oppZSySIsxpZRSSimLtBhTSimllLJIizGllFJKKYu0GFNKKaWUskiLMaWUUkopi7QYU0oppZSySIsxpZRSSimLtBhTSimllLJIizGllFJKKYu0GFNKKaWUskiLMaWUUkopi7QYU0oppZSySIsxpZRSSimLtBhTSimllLJIizGllFJKKYu0GFNKKaWUskiLMaWUUkopi7QYU0oppZSySIsxpZRSSimLtBhTSimllLJIizGllFJKKYu0GFNKKaWUskiLMaWUUkopi7QYU0oppZSySIuxCCAiW0TkiIhkenw0rOQ1TxWRHb6K0cs2bxKRTSJyUER2icgzIhIVwPZbi0iWiMwo5RwRkcdFJNX98YSISKBiVCocaQ6rdLs3iMgyEckWkTe9OP9mEUkWkXQReV1EYv0dY6TTYixyjDDGxHt87LIZTAUT0BdAd2NMDaAT0AX4p08DK92LwNIyzrkaOBdXbJ2B4cA1fo5LqUigOazidgGTgdfLOlFEzgTuAAYDzYAWwIP+DE5pMRbxROQUEflZRNJE5E8ROdXjtStEZI2IZLjv5q5xH68GfAU09LxLFZE3RWSyx/sL3Xm6725vF5EVwCERiXK/7yMR2Scim0WkxMRkjNlojEkruByQD7Ty6Q+kBCJyCZAGzC/j1HHAU8aYHcaYncBTwHg/h6dUxNIcVjZjzMfGmE+BVC9OHwf81xiz2hhzAHgIzWF+p8VYBBORRsCXuO6YagO3Ah+JSF33KXtx9ezUAK4AnhGR7saYQ8BZwK4K3KWOBs4GauJKRF8AfwKNcN2J3eS+Mysp5jEichBIwXVX+Wop56aV8nGHl/EiIjWAfwOTvDi9o/v7KfCn+5hSysc0h/lFcTmsnojU8VN7Ci3GIsmnHr/En7qPjQXmGGPmGGPyjTHfAMuAYQDGmC/dd3LGGLMQmAf0r2QczxtjthtjjgA9gbrGmH8bY44aYzYB04BLSnqzMWamu4u/DfAKsKeUc2uW8vFYOWJ+CNed4nYvzo0H0j2+TgfiddyYUpWmOaziOaw8isthANX91J5Ci7FIcq7HL/G57mNNgQs977aAfkADABE5S0QWi8h+92vDgMRKxuFZ0DTF9ZjAs/27gHplXcQYswFYDbxUyXgKEZGvPB5bXCoiXYEhwDNeXiIT1114gRpApjHG+DJOpSKQ5jAvFM1hFbhEcTkMIKPy0amSBGwmmgpK24G3jTFXFX3BPXvmI+By4DNjTI77brSgh6e44uIQUNXj6/rFnOP5vu3AZmNM64oEj+vfb8uSXhSRzFLe+4gx5pHjgjPmrCLXuAnXINZt7s6teMApIh2MMd2Lue5qXI8efnV/3cV9TCnle5rDigZXJIdVQEEOe9/9dRdgjzHGm/FmqoK0ZyyyzQBGiMiZIuIUkTj3gNUTgBggFtgH5IrIWcAZHu/dA9QRkQSPY38Aw0SktojUB24qo/1fgYPuAbFV3DF0EpGexZ0sIleKSJL78w7AnZQyoL7IzKuiH8clsRJMxZUsu7o/XsE1RqWkMSHTgVtEpJG4pt5PAt70si2lVPloDvOCe6JBHODEdTMZJyXPBp0O/ENEOohILeAeNIf5nRZjEcw9BuocXN3q+3Dd5f0LcBhjMnBNuX4fOACMAT73eO9aYBawyd093xB4G9dgzy24xma8V0b7ecAIXEXOZlwDWl8DEkp4S19gpYgcAua4P+4q7/ddHsaYw8aY5IIPXF34WcaYfQAi0r/I3euruAb0rgRW4SrcShygq5SqOM1hXrsHOIJryYqx7s/vARCRJu5Hmk0AjDFfA08A3wNb3R/3ByDGiCY6lEUppZRSyh7tGVNKKaWUskiLMaWUUkopi6wWY+La/2q1iKwSkVnuAYZKKRUSNIcppXzBWjHmXjn5n0APY0wnXLM8SlwoTymlgonmMKWUr9heZywKqCIiObjWdil1O4ppizbpbAOlIkj7BjXo1zoxmHcv8DqHzVudbLamHg5YYEopu6KcwhV9m3uVv6z1jLk3UX4S2AbsBtKNMfOKniciV4vIMhFZtujzWYEOUymliuVNDvPMX3M+eNtGmEqpEGDzMWUtXOvDNAcaAtVEZGzR84wxU40xPYwxPQaMHB3oMJVSqlje5DDP/DXswstshKmUCgE2B/APwbWNxD5jTA7wMdDHYjxKKVUemsOUUj5hsxjbBpwiIlXFtenfYGCNxXiUUqo8NIcppXzC2gB+Y8wSEfkQWA7kAr/j2gewXARDQnQ+cU5wb+QcVIwxZOVBeo4DQ/DFp5SqGF/ksGDPX6A5TKlAsDqb0hhzP5Xc8yohOp+a1eLIlygIxmRmDHEmFw5lkZbjtB2NUsqHKpvDgj5/geYwpQIg5Ffgj3MS3IlMhHyJIk5zmFKqiKDPX6A5TKkACPliTESCO5EBiATtIwillD0hkb9Ac5hSfhbyxZhSSimlVCizvQJ/2Fj243e8/Ph95OflMfS8MVx85UTbISmllFc0f4WfR28YTWZmxnHH4+Orc+cUXUA92ERUMXbj5aNIP3jwuOMJNWrw3PRPKnzdvLw8Xnz4Lh6Z+h6J9Rvwz0vO4pTTzqBpy7aVCVcppY7R/KXKIzMzgxZXvnDc8U2vaaEdjCKqGEs/eJDWV0857viGqTdU6rrrVv5OgybNaNC4KQADzzqHX76fq8lMKeUzmr+UCl86ZswHUvcmU7d+o2NfJ9ZrQOqeZIsRKaWUdzR/KWVfRPWM+Ysx5rhjOvMo9OmYCxUJNH8pZZ8WYz6QWK8B+5J3Hvs6Zc9uaifVsxiR8gUdc6EigeYvpezTYswH2nbqyq6tm0nesY069eqz8KvPuP3xl2yHpZRSZdL8FZ7i46sXe+MYH1/dQjSqLBFVjCXUqFHsYNeEGjUqdV1nVBTX3fUId187mvy8PM4YdQnNWungV6WU72j+UuWhQylCS0QVY5WZ/l2WXgMG02vAYL9dXykV2TR/KRW+dDalUkoppZRFEdUzplR56JgLpZRSgaDFmFIl0DEXSimlAkEfUyqllFJKWWS1GBORmiLyoYisFZE1ItLbZjxKKVUemsOUUr5g+zHlc8DXxpgLRCQGqGo5HqWUKg/NYUqpSrNWjIlIDWAAMB7AGHMUOGornsp4+t6bWbLoG2rWTuTVTxbYDiek6RZEKlSESw7T/KWUfTYfU7YA9gFviMjvIvKaiFQrepKIXC0iy0Rk2aLPffPHOP1AKg//cywH0/b75Hqnn3MRk1+e6ZNrRbqCLYiKfhRXoCllWZk5zDN/zfngbZ80qvlLqfBj8zFlFNAdmGiMWSIizwF3APd6nmSMmQpMBZi2aNPxO9pWwHefvkP+rj+Z/8kMRl3xz0pf78QevUneud0HkSmlQkiZOcwzf81bnWy2ph6udKOav1Q4i9SnIzaLsR3ADmPMEvfXH+JKZH6VfiCV37/5kBfPO4HrZ3/I4FFjqVGztr+bVUqFn4DnMM1fKtwVPB0pqrg1H8OJtceUxphkYLuIFGyCNhj4y9/tfvfpO4xoBa3rVWFEK5j/yQx/N6mUCkM2cpjmL6XCk+3ZlBOBd9yzkDYBV/izsYK7ytsuTgBgzEkJjH5P7y6VUhUWsBym+Ss4RepjNeVbVosxY8wfQI9AtVdwV1knPhpw/bfg7tIXYy9U5ekWRCqUBDKHaf4KTpH6WE35lu2esYBa+esP/LA7i1krdhQ6XnPfD5VKZo/eNoEVS3/mYNp+xg7uztjrb2XoeWMqG25E0jtJpYqn+SuyaI9bZImoYuy+lz/wy3XvfOJlv1xXKaUKaP4KLempKdw9fvhxx70tpsK5x620QjNSn45EVDGmlFJKBUK+yQ/bYqqySis0H35ztoWI7NNiTEUM7fZXSoUjzW2hL+SLMWMMGAMitkMpmTGuOJVV4dztr0JTSOQv0BxWipIeq4nJD1gMmttCX8gXY1l5EGdyyScqOBOaMThMLll5tgNRSgWboM9foDmsDCX1PBU3XkypkoR8MZae44BDWcQ5QYIwmRljyMpzx6mUUh6CPX+B5jBbInUge6QK+WLMIKTlOCHHdiTBSccSKBW8NH+Fr8oWU/7Ozzb/NmihebyQL8ZU6XQsgVJKBV6w3+za/NsQ7D8bG7QYUxGjuLuxtH3JiCPquPEd2nOoVPjKSNvPu//5F6Nve5L4hFq2w6k07WkKfVqMqYhRXHF19/jh2nOoVIRZ+tV7RO1Zya9z3mXQ6Am2w6k0vXEMfToiUymlVMTISNvPukWf8NSoRqxb9AmZ6Qdsh6SU9oyp8KOTFpQKX5X9/V761XuMaA2tkqowovWhSvWOaa5RvqLFWJiLxLEEOmlBqfBVmd/vgl6x+y9OAGB09wTGvPcJvYZdcmzsWHkKrFDONZH4tyGYaTEW5vTuTCmlXAp6xepUiwZc/x3RmkK9Y6FcYJWH/m0ILlqMqYimd4dKRY4Nv//E73uzeG/FjkLH45N/sjaQXx91KgiCYkxEnMAyYKcxRvePUAHlTbLTZKlKE045LC83lzW/zCU/r/AqtHWbtqNB83aWovKda56YYTuE40RKT5wqnfViDLgRWAPUsB2ICpxQKnA0WaoyhHwOS9m1lVVfvUVCbgoX921FUs1qhV7/9s/3+HFOGo5aJ9DxzMtJqFPXUqSqLKGUW9X/WC3GROQE4GzgYeAWm7GowPJngaOPHlWghHoOyzqcydJPptI6eh8vjO5G7RrVij2vfbP6AOxOSefJTx7lT0cSfcbcQlR0TCDDBYLr9zuYYimgN4+hyXbP2LPAbUCJ/3JF5GrgaoCxkyYzYOToAIWmQpXe/akAKjWHeeavG+9/go6Dzg9gaKXbs30zqz99ngfOO5G2Tdt69Z4GiQk8ddVgNu1K5YFXJlGrQz86Db7Yz5EWFkz7JoZ6rtFetOBhrRgTkeHAXmPMbyJyaknnGWOmAlMBpi3aZAIUnlJlSk9NOW4bJdBEFim8yWGe+Wve6mSzNfVwACMs2abli3Csmc0rV/UlIb5Kud/fomEdpt88lE9+Xs+7r9xLv8tvJ65qvB8iDbxI+t3VXrTgYbNnrC8wUkSGAXFADRGZYYwZazEmpbyWb/I1kUW2kMxhfy+dT60dC7hj3MBKX2tUnzb0a5/BxKl30eb0y2jcsacPIgx95elx8sWjTs/2DqTsZcUU18xQZ1xVOl75VHlCV5ZYK8aMMXcCdwK47ypvDfYkpoJHILvXS0qWYvJ92o4KLaGYw9b9MpeGKb9wy4Wn+OyadWtVZ9akM3hw5sdsysmmRdd+Prt2qCpPj5Mv8pVnezu3bCA2sQkAu968qdLXVoFhe8yYilCVvRsMZPd6ScmyuEeUSgWrNT9+SfODy5g4yve9VyLCA5f24+F3v2bxpr/ode6VOBy69bENTqeT7JRtAORk7j+WE3UCU3ALimLMGLMAWGA5DBVAkTQuQ4W/YM9ha3/4gtaH/2TCyB5+befuS3qzZM0Onn9jMgOuuEcLsiLS9iV7Nc60Mj3/9Ru3OPZ5dmIS8fHVyczMIDMzo1DbWpwFl6AoxpRSSvlHyu5t5KxfyIRrTgtIeye3P4HbY6N49LUHOfUf9+FwOgPSbigw4iixR7/ouK/o+NpA4XFfFen5L+0pQjAuzRGptBhTqoI0kalgt2/nZjZ//hwvXhuYQqxA5xb1uWeYMPm1Bxj4j/txRgXmT00oL9VgY9xXsP9MIokWY0p5KZQTvYo8uTlHWfbeM7x782BiogOf6js2q8cDIx3cP+1eBl75YEAWiA2WpRpKulFziO8e23rmo7R9yfz22MXH2kiok3gsjuJylgo+WoypkGSjVypYEr1S3vjh9X/z1GU9rBRiBdo2rsvD5wl3Tb2XgVf9m+iYWGuxBFIgJv2Ulo8efnO2X9pU/qPFmKoQG71E2jOllHf++GoG53WpTfMGdWyHQstGiTxxcSfumP4Eg66813Y4PudtXnr0htEcSNnLzi0bCp3n1DF1Ci3GVAXZ6CXSnimlyrZu6QK6RW3mgn7dbYdyTNP6tRnVIZkFX75Ft7PH2Q7Hp7zNS5mZGcQlnkDq7KcLHc/J3E/jZi0LFXS+XJ5Cx7aGBi3GVMhZ/dok8rL+t61MTuZ+7h4/XHvIVMTbs30jR//8jCuuGmQ7lONcNKADUT+tY+5n/6XHOf+wHY7PeZOXilsNf9NrE7lzyiwevWF0sUVT42YtK5XXNCeGBi3GVMjJyzpMw/HPHvs6O2UbjZq11h4yFdGyjxxmxSdT+O81/XA6g3N9r/P6tiV68Qa++Phlep43wefXL+gFSk9NId9jhwwx+X6/YSualw4nb8QZFc32d+/h7vHDjz2idDqdhdYCK+BtXAWPRYs+8izpuio0aDGmlJe0u18Fsx9nPs1/xnSnerU426GUasQprYmO2sgH77/AyRfegIj47NoFBc3d44cHwZAGITaxCdHxtWlx5QusmDKB2MQmxx4/VlTBY9HVr00q9MgzJ3M/h92LvKrQo8WYCmpFF0LcuWUDubk55B7NJirAM7O0u18Fq01/LqZPvaM0rV/bdiheGdqjJdFRm5nx7rP0vuQmnxZkwST3aDa5uTnH8lbO0WzycnNJ3r6p0r1YRR95Fp1FqUKLFmOqQgLVS+Q5OLbgTjD/SAZH9+8kz72QpNPpJHn7Jg6k7D1uGreOI1PhLuvwIbZ//zaPTDrLdijlMrhrc6Kc23j9nSfpe+mtIVuQFeTCnMz9x/V6GUCcUcQmNsFZpQbJ79xGXuYBROBwYtKx93urYEbmiimFH/E646pSpdLfibJJizFVITYKnII7wRVTJuCMiqJRs9bHXtu5ZcOxxwGedByZCneL3nyEF//RPySLmYEnNiEmagcvvfUY/cfdEZLfg+ej0aI5yVOjSx8FXCvqx8dFVagXKzMzg/qXTD62On+BXW/eBHHH/znX5YBChxZj6jjB/gvsjKtK8rv3kO2+swTXI8y4xBMC0n6w/3xU5Phr8bcMb1eFpFqhO06od/sTiHIIz70xmf7j7w7ZzcWLPi04kLIXiauOs0oNazFlZmZwhJhCszwBDqRs5NEbRvssX2lOrDwtxtRxgn09r45XPlXsKtNH4Lju+5zM/T5NOhD8Px8VGbIOZ5K65GMuv2WY7VAqrWfbRtzqdPDE6w8x8Ip7Kr25uI3JNkVzzKM3jGb7lo1IVHSh/SWdcVWBoxVux3MNsgI5mfuJT2xZ7PlFZ3mCawZ65rfPVziGojQnVp4WYypsBCLpKBUsFn/wIg9c1DMkH+0Vp2urBtzldPDQ1Hs56fyJ1EpqUOFrBUNvzJ1TZvllVmdxA/+zE5OC4ntWFafFmApqRe9w0/YlY8SBQxyFBuun7UsmqnqijRCVCrjdm9fStUYGrRt3sh2KT3VqXo8Xxlbl7hmP03T4ROo1Lr63J1TocjjKW1qMqaDhzbiDku40f398dLGzmXTfNxWOVs15gzeuPtl2GH6RVKs6L08YxAPvTOX3Wh3pNuxy2yFVmK97q7S4C1/WijERaQxMB+oD+cBUY8xztuJR9lVm3EFCHVevmOdspmPvr3xoSh3HVg7bvuZ3+jaNo1qVwK6zF0hRUU4mjxvAZ79s4N0Xb6PL+TdSp34jn1w7I20/7/7nX4y+7UniE2r55JoVUZFB7+VdpT9tXzJ5xnA4+X9Z0BkVpTepQchmz1guMMkYs1xEqgO/icg3xpi/LMak0LuvsujPR7lZyWHr5s9k+nV9/dlE0Dind2v6dWjIIx88zeY6nThp+PhKj5Fb+tV7RO1Zya9z3mXQaN9vyeQtfw56L7o+Y9rXrnGzOZn7qVWB9c3KYnMbqnBhrRgzxuwGdrs/zxCRNUAjQIsxy0L1lyZQRVKo/nyUb9nIYZtXLubUVvHExkT7q4mgUyehGk9dOYhFq7Yx5bmbOOHkYbQ9+fQKLYGRkbafdYs+4cVRjbh+9if0GnaJ1d6xQPBcqd9fq/QH1zZUoSkoxoyJSDOgG7CkmNeuBq4GGDtpMgNGjg5obCp0aJGkbCkph3nmrxvvf4KOg86vVDtbFn3EQzcMqNQ1QtWATk3o37Exn/68klnPzqbTeRNp0KxNua6x9Kv3GNEaWiVVYUTrQ9Z7x4KFrhNmX4nFmIicCEzDdaf3FXC7MeaA+7VfjTG9fBGAiMQDHwE3GWMOFn3dGDMVmAowbdEm44s2lR2++IX3d++XJqXwEQw5zDN/zVudbLamHi7mCt45sHc3LWpH4XSG5qKoviAijOrbnmE9W/HkR2/w/dcO2gwZTaNWZc8qLegVu//iBABGd09gzHv2esfSU1OOW6W/4Hig6Tph9pXWM/Yy8ACwGLgS+FFERhpjNgI+6SMXkWhcSewdY8zHvrimCl5l/cJ7U2j5uyDSpBRWwiqHrfz6bZ45r6s/mwgZsTHR3D26H0eyjzLl83eZPy+Xep360q7PUKKiiv9fW9ArVqea6/U61aIZ0RprvWP5Jv+4bY0KjqvIU1oxFm+M+dr9+ZMi8hvwtYhchmv/00oR1yjM/wJrjDFPV/Z6KnQkb99EXl7esa8LNviOj69e7HiGR28YfdyaYgVrjRXMogTtvVLHCasc5ji4i8SaHf3dTEipEhvDvy7sTVZ2Dj+v2cL0V24lttlJdBs6BmdU4T9vG37/id/3ZvHeih2Fjscn/2SlGBOTX2hlfs/jlVXeJwjB1EsXqUorxkREEowx6QDGmO9F5Hxcd4G1fdB2X+AyYKWI/OE+dpcxZo4Prq2CWF5eXqE7woINvkvqfSraW7ViygQajn+W7JRthZay0N4rVUTY5LCt61bSr7UvQg5PcbHRDOragkFdW/DTqm289uokanceQrv+I46dc80TMyxGWNijN4z6qN9pAAAgAElEQVTGiIM6w28pdNzpdHJ47jOVvn55b0p91UunM80rrrRi7HGgPa4ufgCMMStEZDBwb2UbNsb8CITHPh4qIun4sqAXNjls+69zuPHc8g1Wj1R9OzWhb6cmfLBoDe8/M49m/S+gTY+BZb7Pm99nX/3OZ2ZmEB1f+7gCqOii1eURDPlI817FlViMGWNmlnB8G3CV3yJSKkTo+LLgFk45TNJ3UbdWeG195G8XDmjPeX3bMv3bH/j6lfn0u/x24qpWK/F8b36fg/l3vjKx+fORqfJOUCxtoSJDQRf2gZS9RMf/75GLM66qxagK0252FWz2J++gVb2SiwhVMqfTwRVnduPsXhn867XbaDx4PE079rQdFuDKe0ULoJzM/TRuVng/zkD0eNWsWz9oi8xIocWYCpiyFgYMBtrNroLN9rW/c0W3xrbDCGlJtarz5s1n8cynX/LLih855ZKbjlvJPz8vl63v3EXDUbcTXTXB7zF5LsZaYNNrE4/LQYHsjVv92iTysv63/EpO5n5dQT9AtBhTAVfe3qei5+dmpLB1yuU4xEF2kdmUSoWbjI1L6XJqD9thhDwR4ZZRPfn5r+288vbj9Lvs9kIFWf7hNBo69pG+fA6J/SJrcfGCHJuVspf6l0w+dtzpdFK/cQvtIQuAMosxEWmDa72eesaYTiLSGRhpjJlcxlsjWsru7axb9Bl5KZupU6NKodeMgV0Hc0ho1IrWfc+mdlJDS1HaUd47rJLOL9p9n5mZoXdx6jihnMOMMciRNKKidGNnX+nToTFRDgdT3nyEvuPuxOFwkJG2n/i8gzw0vAUTv/yGnO7D/No7FmzDITyfWnjOUFeB403P2DTgX8CrcGw20kwg6BOZDbs2rGTNvLfp3jCGfw9pR+Ok04rd2NYYw/odKXywcArfpRhanHoRJ7TqSFR0jIWoQ5PtwbTBllBViUI6h9WvGWc7hLDTq10jbnYKT74+mQFX3M3Sr95jVPtoGpq9DKufxTuvXktUdVevu+fvs69+5/1xs6j5KLR5U4xVNcb8WqSgyPVTPCHr0ME0ln7wAu0Tsplxfb8y72RFhLaN63LP6LpkH83h3YVfMXfuf2k2eCzNTzwlQFGrytDet5ARsjls21/LaNeopu0wwlK31g25w+nk/pfuYvPq5dx/SRvqVIvm+qQcfkpP57LH3j5umyR//M77aoC+5qPQ5k0xliIiLXGvWC0iFwC7/RpViNm6eimb58/guf/rQ2LN+HK/PzYmmnGnd+HyIZ156Yuv+XbhR7QdMprG7br7IdrAC4b1b1REC9kclp68lb4dGtgOw6qUtEyueWwGU++8jDoJvp1VemKLejRz/MAhSaNOtbpA4LdJ8sU2cSXR3Bs6vCnGrse10W07EdkJbAYu9WtUIWTdL3NJ2vMTb0wcREx05eZDiAjXj+zJlUdzePbzT1n8xyJ6XnDDcdt62FbeX/DMzAyOEFNolg7AgZSNPHrDaE0Kyt9CNocdSkslOqpK2SeGself/syB5O28Nfsnbrn0DJ9f/+9tyazbd5SuT6ynerWqpO9PwRhDtnmJ+XO/PHaerQKmMm2WdyiHPuq0p9S/8iLiAHoYY4aISDXAYYw5/q9whPrrh9m0yPiNiRec7NPrxsZEc/sFJ7NiUzLPFLOth20VGauVl3WYhuOfLXQsO2Ubmd8+7/P4lCoQ6jnMeXAnzRr4Nr+EkpS0TGYvXMrL5yUyYfZSxg3v6/Pesc+fugGAW/+7kNpDruPZu2+I2DW39MbYnlKLMWNMvojcALxvjDkUoJhCwl8LPqVN1iomjPTflPPOLerzxo31+e+8P1nw5ir6XDoppAf45x7NLrQ7c15uLgdS9la4d0zv4lRZQj2HRfosyulf/szwVg7aJsUyvFWW33rHAB4d14+Ln3oKYyq9h3ylJW/fxIGUvdw9fnih4/p4MXx58/zrGxG5FXgPOJbMjDH7/RZVkFs5/0M6s54rh3cLSHv/OKMLg3ancveUm2h3zkQatGgfkHZ9zQAS9b9iUhxRRMfXLvaRpzc0KSkvaQ4LQQW9Yu9f5Lq5urx7NS563z+9YwDRUU5uGdaOb3/6zefXLq+8vDyi42sf10MXCb1zkcqbYuz/3P+93uOYAVr4Ppzgt2Leu/SI2cK4IV0C2m7zBnWYMeksJr/7Nkv/bErPUdf4vA0d7Pk/+rMIK5rDQlBBr1hivOvPVGJ8FMNbOfzaO9bvxGY48rLJTt9HbEJdv7RRVHE9/AdS9hKXeEJA2lfBocxizBjTPBCBhILkrRtI2LecceMGWmnf4XBw35i+zPttE6+/MIne4++navUaPru+v9btio+vzoGUjeTs34k4/vdPThyOSl3Xn2yvYaZ8R3OYf/hzliPAguXr2bU3m5kr9xY63nDPer8VYwAJNaqT+utnNDz9Sr9c35sbPdeWccdvl1ReOpQjdHizAv/lxR03xkz3fTjBa++OTSTPe5nnrz7NdiiccVILerauz7/evIe6p1xAi+4DAtp+eX/B75wyi0dvGM32Dx8otEE4FGwSftQfYSoFaA7zF3/PciwYWB9o9WpWZdOqb1m79idiqvxvqSJfFTCButHT3v3Q4s1jSs8t7uOAwcByIGIS2dHsLP78eAr/vaYvTmdw9ObUqlGV1/55Ji98sZCf3l5M70tvxRGgnqaK/CLfOWVWiRuEa2+T8rOIz2G+FohZjrb8+vL15ObmMfrpeQy56fmA5VVf09790OLNY8pC/+dEJAF42xeNi8hQ4DnACbxmjHnMF9f1tR9nPMUTo7tSvVrwbUsycUR3/vh7Nw+9cBu9LrmZ2vUa2Q6pRJ49aumpKeSbfADE5B+bNaR3bcrXNIf5XiBnOdoQFeWkd8va7NuxmXpNWga8fW+fPpTW+6VCS0VWEz0MVHonURFxAi8CpwM7gKUi8rkx5q/KXtuXNq1ayilJ2TRrUMd2KCXq2qoBb19fh39OfYI6PUfSqudg2yEVy7PI8qaXTLvZlZ9EVA7ztUDPcrTlyqGdmfDWW9S7+t8Bb9vb/Ka9X+HDmzFjX8Cx5aEcQAfgAx+03Qv42xizyd3Ou8A5QNAksuwjh9k67w0evvUs26GUqWpcDK9eP4Tp85fw29xkupxZ/gXGg22wp81EE2w/C1VxkZzD/MHGLEcbalSrQtOqRzh4IIUatRKtxODLG9LVr00iL+swOZn7C61fpje3wcGbnrEnPT7PBbYaY3b4oO1GwHaPr3cAxy01LSJXA1cDjJ00mQEjR/ugae8seutRnv9H/5AZM+B0OrjijC5UWfgXn0+7nz6X3UFsnPdbqegv5P/ozyKsWMthnvnrxvufoOOg88vVgMnPr2SIvmdrlqMNY09tx7OLPqPnOf/w2TXj46uz/NELMVL474pDHIUWwH70htFs37KR+pdMLnSe0+kkc+4z5W63YBeU7JRtNGr2v45h7UULDt4UY8OMMbd7HhCRx4seqwAp5thxSx8bY6bi2leOaYs2BWxp5DVLvuesVrHUr+O7pSMC5ZKBHRjYMZ2bp95Nx5HXUr9ZG9shKWWTtRzmmb/mrU42W1MPF/OWkmXF1mHH3gOckFSrwkH6mq1Zjja0a5LEoc8W+vSa3k5myszMIDq+NrGJTQqdk52yzat2ivbu52TuJztlG05nZO/qEKy86fI5vZhjvnhutwNo7PH1CcAuH1y30rIOH2LvL+8z/syutkOpsAaJCUy9pg+ZP0xjy6pfbYejlE0hm8Oq123IkewcX15SlYPD4aBd3Wj27w2KP03lcueUWTz85uxjH7USk2jUrDX1G+tax8GoxJ4xEZkAXAe0EJEVHi9VB37yQdtLgdYi0hzYCVwCjPHBdSttyYcvcd9FvRAp7sY3dNSoVoXHxg/k/nc+4c99u+l86sig+Z6CbUyWThYIP+GSw4Jgq8Sg5O9FZwv0ad+Qz9f8Ru2khn5ro6KCLY+qiivtMeVM4CvgUeAOj+MZvtjTzRiT697Ady6uaeGvG2NWV/a6lbV781o6x6fTpkkH26H4hIjw4KV9+eKXtXww/XH6X357UBRk3hQ4gUw0OispLIV8DqvXuitfLJ3OpCZJvrxsWPD3orMFBndrwVuvrwRG+Oya6akp7Nyyodjj5aE3iuGjxGLMGJMOpAOjAUQkCdeCifEiEm+M8e7BdSmMMXOAOZW9jq/k5uaw6rOXeeN6O9sd+YuIMLJPe+rX3skLbz5Mv3F3hcSkBE00qjLCIYclNW7JpkVZ/rp8yAr0orN56XvLPqkc8k3+cWPBCo57csZVZdebNxU6lpO5n8bNyr/2WWVvbvXpgX95s7TFCOBpoCGwF2gKrAE6+je0wFv53adMGNKaalVibYfiF73aNWJSlINHn7uJ7mNuD+oFYpXylVDPYXvTyjfoPxIEetHZpJrez0r3hpj844qsguMF4uOrQ2YGxBX+Mx2f2LLCu6BUhj498C9vZlNOBk4BvjXGdBOR03DfaYaTnOxs0v9awKlnBf+aYpXRtVUDpt+YyPWvPEHLc28hsWFT2yEp5W8hm8NEhKga9WyHEVR8veisN2PPkqoK+3Zto27D43uzKqJm3fplFjba2xRZvCnGcowxqSLiEBGHMeZ7EXnc75EF2MrvPmTi2Z2DYjyVv8XGRPPShEFc/8oz5I/4J0kn6OwaFdZCOofl5efZDiGo+HrRWW/Gnp3YpBY/pe33qhjTx3mqIrwpxtJEJB74AXhHRPbiWjgxbGRnHSFz/c+cMmKY7VACJiY6ipcnDGLi1BfIP2NCxK9FprOSwlpI57Coem1Y8fdOOrfSYQXg20VnyzP2zHhZFIfK4zwtGoOLN8XYOcAR4CbgUiABCPxmXX608pv3uHlEF9thBFxUlJMp1wzipmmvYAZfTYPm7WyHZI0mn7AW0jmsSs26ZB1Nsx1G0PDlorPejj3r27EJ73/4DS07neSTdoPh5i9UisZIUWYxZow5JCJNgdbGmLdEpCquadxhIfvIYY5sXkb3UeE9VqwkTqeD564+jRtefoW4UbdTK6mB7ZCU8qlQz2FN2p/EJ7O/pVcHHd/pS+UZe1arRlWi8N3WVKF48xcMBWQ482Y25VW49larDbTEtR/bK8Bg/4YWGH98/Q63nxu6K+37gsPh4MExpzDxzSfpd+VDxFapajskpXwm1HNY9Zq12W3CfyxroJV37JmJ8NV3Q7GADCXeLDZ1PdAXOAhgjNkAhMUKhNlHDpO38086ttDeoLq1qvPcuB78MO1e26Eo5Wshn8P2pIXeWmMpaZmcf8crpKYfsh1KIQVxzVuyhpkrs+nx4t5jHzNXZrNg+fpi35ef4du1xpTy5M2YsWxjzNGCWYYiEkUxG3qHouVfvMEdo7rbDiNo1Ktdg6tOa857H75Irwuutx2OUr4S8jksOqGu7RDKLVAr5JdXQVzDTx9YrrjqJhz/xKC4QfBp+5L5/fHRJNRJLHRcH+ep0nhTjC0UkbuAKiJyOq693r7wb1j+Z4whZ+/ftG06xHYoQWVIt+YcOrKWb+Z/RJfB59sOxxqdaRRWwjKHBbNAr5AfiLhyc3PJzc0hKir62LHSBsE//OZsn8XtDzoGLLh4U4zdAfwDWAlcg2vrj9f8GVQg/PXT11zQq7HtMILSyN5t+frFeRzsNpAatRPLfkMY0plGYSXkc1ieM47k1IPUr1PDdiheCfQK+YGIa2jnesz7YzHte/T3c5SBoTeVwaXEYkxEmhhjthlj8oFp7o+wse/P7xgxcYDtMIKSiPDw5f24/rXJ9L3qER3Q72Pa6xYY4ZTD6nbqz6rNP4ZEMebrFfKDJa5GiQlkrNgM2C3GvM0fmmdCS2k9Y58C3QFE5CNjTNg8s9r59yq6NIgNic2ybaldoxq3j+zEc5++Sp/RN9sOJ6xor1vAhE0OE0JnNqWvV8j3VlnbGlU2rhNbNiRn/lKfx11e3uYPzTOhpbRizPO3P6z2y1m/6FNeHhPZy1l4o2urBnRauYOtqxbTtNMptsMJanoXGpTCNocFM1+ukF8eZU0Y8EVc0UdSyc/P1xt55XOlFWOmhM9D2qGMdOpJGgnxVWyHEhImjerJ2KdnUrdpB6pWD/5HJLboXWhQCsscFux8uUK+t7wZmO+LuC7u15LPf5rLif1di4TrIHjlK6UVY11E5CCuu8sq7s9xf22MMSH5l3nT0vlccIreJJfHvy8+iXs+nMJpV9xlO5SA0SQbFsIyh6njBWrCQP8Tm/Hys1/Tqd9QRER7vZXPlFiMGWP8tl2IiPwHGAEcBTYCVxhjArL52t41i+k3JLRmw5Q1FsLfWjRKpF+DbWxcvpCW3QcGvH0bNMmGvnDNYaqwQE4YiI2JZkS3eqxbsZgWXXr79NoqsnmztIU/fAPcaYzJFZHHgTuB2/3d6MEDKTRLcITc8/5gWDzxuuHdue7Fj0hv2oGEOqG3AGUw0V63sGAlh6njBXrCwJhTOzL+uZk0atuV2LjAD3fxNn9ongktVooxY8w8jy8XAxcEot1NS7/jsl6htdluMC2e+Pj4gVz36mS6XHqPFmSVoL1uoc9WDlPH83Zgvq+eMMRER3HPhd2Z/PbjDPy/e3E4A7vnvLf5Q/NMaLHVM+bp/4D3SnpRRK7GtckvYydNZsDI0RVuKHXdEk4ZNqjC77chmBZPrF4tjlcmDGTCqw/R6eK7qFW3vpU4gpHehUa0EnOYZ/668f4n6DgoZFfXCFreDsz35ROGDk2TuGtoPo/890EG/N99OKOC4U+pCmV++xckIt8Cxf21vtsY85n7nLuBXOCdkq5jjJkKTAWYtmhThWdEpafuo1Wd6LJPDCLBuHhitSqxvDrhNCa8/AjtLryDOvUaWokj2OhdaPjxRQ7zzF/zViebramHKxTL0axDSFzorDUWbPzxhKFT8/rcN9zJA9PuY+BVDxbaJkmp8vLb4CljzBBjTKdiPgqS2DhgOHCpMcbv0863b1jNkE6h1ZNT2lgIm6rExvDqdYPY9MkTrP3xS6uxKOUvwZTD9q9eRP/OOgu8ogo/YfBdDm3XpC6TR7Vh0Qs38feKX31yTRWZrIxkF5GhuAa7jjTGVOxWsZwO/LWQ3p2aBaIpn1mwfD0zV2bT48W9xz5mrsxmwfL1tkMjNiaaqTeczkn8xfwXbmXTSk1EKnIEOodFRzmJidZHYWVJScvk/DteITX9UKFjsxcu5fLurp6wy7tXY/bCpYXOqYxWJ9Rlxi1DabJjDt+8ch8H9u72yXVVZLH12z0FiAW+ERGAxcaYa/3a4qEDxMaEVjeyjcUTy2vMqR24ZEA7ps75ivkL3qNmi66c0KUfSY2a4f5/q1Q4CnwOU2UqblxYIGZbOp0Orh/Zk4vTMnl+9vMsO+Cg27nXktigsU+ur8KfrdmUrQLZ3sEDKTSpExvIJiOKw+Hg2uE9uMYY/tyYzC+rpvPb14c4mBNFldr1iU9qQsMOvYiNq0J8Qi3b4SpVaYHOYYczdAmzspQ0LiyQ2zMl1ozn32P7kZ55hKc/fZHv0qJoc/pYGjZrHfBZlyq0RES/9/bVy7isVzPbYYQ9EaFrqwZ0bdUAgJzcPHJy81i6fidr/vwvm/dkcMjEkX4om9ik5tSo35RGHXpStXqCDn5VqhQN47WXuSwlzTy38YQhIb4KD47tT+bhbN75/gPmf76HhOZdadv/HF0WSBUrIoqx9G1/0aBjaA3eDwfRUU6io5wM7NyMgZ2bHTuel5dP+qEj/Lp+PX/9sJjluzM5Qhw1W3WnXZ+ziK1S1V7QSgWhUFuoOtCCceY5QHzVWK45uztXnZXPxp0pzPz2KZalGJI69qF9v2F6E6qOiYhiLCbvEA3rJtgOQ7k5nQ5q16jG0B6tGNrDdexoTi7L1u3kg1n3sO9oDPHNutJxwAjiqtpLpEqp0BDoVfjLy+Fw0LpxEvdfmkRWdg5L1u3g7ddu52hcIh2HXk6teo10jG2EC/tizBhD1sH9tsNQZYiJjqJPp6b06dSUvLx8VmzcyTvv38eOIzF0GDqOBs3b2Q5RKWsCsPpPSAvkuLDKiouNPva0IDX9ENO+fplvt2XQtN8o2vQ8zXZ4ypLwL8by82lUM8Z2GKocnE4H3do0plubxmQezmba1zOZ98Vh2g8dR+M2J9oOT6mA+vvPxXRvVtN2GH7hqy2KQmHmeXHqJFTjjov7kJeXz0c/LuXjZz+mXo9hdOw7VHvKIkxEDETQm8rQFV81lpvPO5m3rutLtVXv8cOMJ0lL3Vv2G5UKE9mHM2jTIDyLMc+lKCKZ0+ngooEdmTXpTAbF/MXCKTez6fcfbIelAijsi7ENyxbQu20922GoSqoaF8PtF/XmsbMbsvGjR1mz6HPbISmlKsFzKQpfLsLqT8UtKutLIsI5fdrxzs2n03TfIuY+P0kXkY0QYV+MHUlPoVOTOrbDUD7SsG4Cr1w3hO6s5bvXHuJodpbtkJRSFeCvLYr8KZA9edcO68qM6/vz98dPsPanOX5vT9kV9sWYCk9jB3XkyfNb8MPUe0hL2WM7HKVUOfh7iyJ/sNGTFxcbzdQbhtA9fzULp95HztFsv7ep7NBiTIWsRnVr8vz4Hmz/9DG2rfnNdjhKKS+VthRFsLLZk3fpaR14+LzWLJp2LwcPpASsXRU4WoypkJZUqzrPXTMY8/sHbF21xHY4SikvLFi+npkrs+nx4t5jHzNXZrNg+fqAxVCe8V/B0JPXtH5tnrm0K3+//xA71v0ZsHZVYIT90hYq/IkID13WjwdnfsqW/Hyade5tOySlfCYqOpYdqZn0sB2IDwXDUhTFbSpe2rm+WlS2Mst5NEhM4KXrTueON99hW34uTdqfVK73q+ClPWMqLIgID1zaj20LZpGZfsB2OEr5TJuT+vPDun22wwgr5R3/5cuevMpOAhARHhs/QJ8GhBntGVNh5cVrBvCPF+9lyE3P635+KiyIiP5b9rGXPlxA2oE0EqvV8KqHy1c9eZ5F4ITZFd870/NpwOa8XJp36euT+JQ9+huuwkqNalW47dwu/Dj9cd1CRoWNg4d0CRdfSUnLZNbcn0mIyeetpWkBHf/ly0kABU8Dqm6Yw9+/LfBdkMqKiCjG9I9yZOnZpiFnNod1v8y1HYpSPpEVE54r8PuaN4PyX/pwAbEmm1dGVGP26oOISEBmR/prEsA9l/Sh5pZv9ZFliLNajInIrSJiRCTRX200aHcSny7Z5K/LqyA1dnBn0v/8iuwjh22HosJYIHIYgKNKTXbs1bGQZfFmPNbH3//Gac0c5Jl8utSDni/sCMhMTn8u5/Gv83ux9bt3dKu4EGatGBORxsDpwDZ/tlO3UXNSM3WhvEh084iuLPtwiu0wVJgKVA4DqNuhL2u36R/a0ngzKD8lLZPa1aK4e2hjOjRvyN1DG9MsqQZzp/zL7zM8/bmcR1SUk1evO41VMydzYF+yD6JVgWZzAP8zwG3AZ35tRYQ9aUf82oQKTh2bJdG6ygZ2b91Ig6YtbYejwk9gchiQdEIzvp/7EUN6tPV3UyGr8HisrGIH5ftyiYry8nexVzUuhlevG8Qlzz3KWTc+jcPp9Gt7yres9IyJyEhgpzGmzJXrRORqEVkmIssWfT6r3G05HA6c1etWJEwVBm46pwcrP3/FdhgqzHibwzzz15wP3q5wewl16rJ9v/bwl8Tb8VjBsNisP8XFRjP5wi4seuMh26GocvJbz5iIfAvUL+alu4G7AK9uQ4wxU4GpANMWbarQSPwjOflkHMqierW4irxdhbBqVWJpWkM4dDCNajV0ELTyni9ymGf+mrc62WxNrfgYRkedJuzal07DugkVvka48rbHKxgWm/W3Ts3rMaxtKssWfEqHU8+1HY7ykt96xowxQ4wxnYp+AJuA5sCfIrIFOAFYLiLFJT2fqN32ZNZu082kI9X1Z3Vi5dwZtsNQISaYchhAk55DWbByqz+bCFnh3uNVXpee1oGoLYvYt3OL7VCUlwI+ZswYsxJIKvjancx6GGP8tvtp8y69+fDT+fRs39RfTagg1rxhHbL3LLcdhgoTNnIYQP0mLVj4XQpjBvmzldAUCT1e5fXwuIH833P/4ZRr/kNslaq2w1FliIh1xqpVT2Bneo7tMJRFvZrVYtemtbbDUKrComNiScuvytGcXNuhqBAQEx3Fg6N7sWTWU7ZDUV6wXowZY5r5+44SILZeK7bsTvV3MypInXZiQ3at094x5XuBymEArQeex0uzfwtEUyoMtGyUyOktY1jz81e2Q1FlsF6MBUrTXmfx8c9/2w5DWdKpRQNykiNz/IgKH03bd2PZtkO6q4jy2mWDOnLw9684eCAg9wuqgiKmGGvQtCW/bc+wHYayRERwHE4lPz/fdihKVUqtjv2Zt1x3FVHeEREeHHMyv73/rO1QVCkiphgDiKvflr937LMdhrKkboIOYlWhr13voXy2ZLPtMFQIaVS3JgObxfD3il9th6JKEFHF2Ilnjublr/+yHYZSSlVYTGwch6qeQHLqQduhqBDyjzO7su3bN8g5qosHB6OIKsaqVU9ge6ZwOOuo7VCUUqrCWvcfzlvzV9kOQ4WQqCgnt53bleVfTrcdiipGRBVjACeOvIYnPlxiOwyllKqwek3b8tvufNIzdd9d5b1ubU6gRvp69u3Ux9zBJuKKsfpNWrDhYIwmMaVUSOs88ipenP277TBUiLn/kpNZ9eUbtsNQRURcMQbQacRVTH73Z9thqABLSa/4voBKBZt6jVvw+17DgYP671p5LyG+Ch1q5rBr8zrboSgPEVmMJTZozP6EDiz+a5vtUFQA5VWtg8MRkf/kVZjqdu61TNHeMVVOt5zXi7Vfv2U7DOUhYv8y9RwxnmdmryIrW7dJigSrNu3CmdjCdhhK+VTdhk1YkSLsP3jIdigqhFSNi6FbPQfb16+0HYpyi9hizBkVRZthV/HcZ8tsh6ICYOaiv2nf/2zbYSjlcyedfz3PfKZbfWl5mccAACAASURBVKnyufm8XqydqzMrg0XEFmMATdp1Ze3Ruvy2fpftUJQfHck+yvrUHKrXrGM7FKV8rk69hmw4GMu+A7rDiPJedJSTXo3j2LNdZ1YGg4guxgBOuWgikz/7i0NHdCG8cPXkh0vocPZVtsNQym9OuuB6nvhEe8dU+Ywe2I7V38yyHYZCizEcDgf9xt3NtS8vJONQlu1wlI/9tn4XG2lE41btbYeilN/UrJPErrwEdu1Ltx2KCiGN6tbkBOd+0lL32g4l4kV8MQZQo3YiHS68lQdm/mQ7FOVDqzbv4ekFe+h94fW2Q1HK704aNYH7Z/6MMcZ2KCqEjB3Qhr8Xz7MdRsTTYswtsUFjoruM4t+zftJkFgYyDmXxyMd/0Hv0LbqchYoI1WvWpkrH0/ny1w22Q1EhpEvrRhzcpI+4bbP2V0pEJorIOhFZLSJP2IrDU8vu/clqM4L739GCLJQt+HMLV7z6Cz3GPUBslaq2w1FhKhhzWKeBI5mxaKPmL1UuberGcGBfsu0wIpqVYkxETgPOATobYzoCT9qIozjNu/Qm/8TzuOPNhWQe1kH9ocQYw1MfLWH6ylzOmPgfqtesbTskFaaCNYeJCK2HXsF/PtL9d5X3Rvdvw/qf5tgOI6LZ6hmbADxmjMkGMMYE1ejBph17Un3AlVz5yo/sTtEBsaFgw/a9jHlqLultz6X3JTciIrZDUuEtaHNYk/YnsXRXHilpmbZDUSGiXdN6mAO6I41NtoqxNkB/EVkiIgtFpGdJJ4rI1SKyTESWLfo8cFNw6zVtS7+rH+amGb8z97dNAWtXlU/m4Wxu/e9CJn+fRr8J/6Fxu+62Q1KRwasc5pm/5nzwdsCCGzD+Lm55/YeAtadCX3RWKvn5+bbDiFhR/rqwiHwL1C/mpbvd7dYCTgF6Au+LSAtTzEAHY8xUYCrAtEWbAjoQIjauCqdd+yifzZvFr2t/4PaLehMT7bcfmSqHP/7ezbRv15JuqtHx7Otp27Cp7ZBUmPFFDvPMX/NWJ5utqYHZ1DuuajzV2/Tm62UbGdqjZUDaVKGtfq1qZBxIJaFOXduhRCS/VRbGmCElvSYiE4CP3YnrVxHJBxKBff6Kp6KcUVF0G3YZuzavY+yzU7i0bzPO6dPOdlgRxxjDnv0ZzFywhhW7DhFVvx1dxz5EbFwV26GpMBXqOazr0Et5/flbOL1bc5zO8JpRnJKWyTWPzWDqnZdRJ6Ga7XDCwiX92zDl9wV0GXKh7VAikq3f0E+BQQAi0gaIAVIsxeKVhs3bMvjG55h/uBXXTJnHqs068yQQvv9jM4+//zPjXvie2+fs4fBJV9Dn2ic5edRVWogpm0Iih3U8azyT3w+/wfzTv/yZA8nbeWu2rg3pK1FhVrCHGlvP3F4HXheRVcBRYFxxjyiDjcPh4MRTzyGr12CenTuL3E+/5sbhJ9KtdSPboYWFZet2sHVPGgvXpnAk13DIUYO6rbtSv/c59G/Q2HZ4SnkKiRzWsHVnlq1ewsKV2xh4YhPb4fhESlomsxcu5eXzEpkweynjhvfV3jEV8qwUY8aYo8BYG237QlzVeE4edRXZRw7z6vwPyfz8Kyae1YGTO+i4JW+kph9i2bod/L5lP3sP5ZNyMAtHfCJV6jWnTvM+tBnTSnu9VFALpRzWbfgVvPjivzipZT3iq8baDqfSpn/5M8NbOWibFMvwVlm8Nfsnbrn0DNthhbwgvJeIKDoavRJiq1TlpOGXk3P0Yt6Y/xHPfbOApgkOrh3aiab1dY2rAkeyj/LtbxuYv3ofew8bomskUa/zadQ8pR7tG4bH3bpSwcoZFUWXUdfx0Puv8fj4AbbDqZSCXrH3L6oOwOXdq3HR+9o75guzl26hXpeQuL8IS1qM+UB0TCwnnTUGGENG2n7+/c1MjqSspn5cLhec0pRTOjaLyC15Nu7YxxOfLOdQdG2Sugym/eV96Oh02g5LqYiT1KQ1yXU7M+fXjQzrFbqzKwt6xRLjXX+6EuOjGN7Kob1jPrAtJYN2jUP330ao02LMx6rXrM0pF94AQPaRw7zzy7e8+vMyHFlpNK5ThfNPbkHrxnWpGhdjOVL/Sc88wqMf/sq+2KZ0vuxhqsZXtx2SUhGv85mXMv3F2+naPJGGdRNsh1MhC5avZ9febGauLLzGbsM967UYqwRjDHuOOOkQgZ0GwUJC6TlxoNcZ87WD+1PYuvJnDmz4DUf2QRrXiuG8k5vTpVWjsJl6vmPvAW595w86nTuBenqXpSqpfYMa9GudGBbbKQRynbGSHM3O4scXb2bGpGFhk3NU5X38418szO1Ep35n2Q4lrEQ5hSv6Nvcqf2nPWADVqJ3IiQNHwsCRGGPIPnKYaT/M5vD8xcTkH2Zg+yTO7tmKWjVCc3Pr2Us28Pbi3Qy8+mGiY0N/oLBS4SYmNo5mp1/BlM9nc+Ook22Ho4KAMYb3F29n8E3/sh1KRNNizBIRIa5qNbqfeTFwMbm5OaxZu5y5788nNiuF0X1b8P/s3Xd4VNXWwOHfSq8EQkINvUmTKkgHEUEEuVxRARXlqiCKXrFexd6uDStK87Og2KUoRUEEUZESUASkCUhvAQIppO/vj0m4IaRMMuVMWe/zzGMymTlnDcjKOvusvfclbet7zR6LW/Yc4eNNWVwy7r9+2R+nlLdo2LoTa3dvYvmGv+ndpr7V4SiLvfjVGpr0v9HqMPye/tb0EEFBwdRv1ZmeNz1Mh1teYG5SbUZO+pbvEv/y+P3Ctu49xlPf7KTHdRO0EFPKC7QbeANTlu9lz+ETVoeiLPTbjgNszahK3eYdrA7F7+lvTg8UFBTMhX2G0nP8q3yb0oib31jCjv0et7g3AH/uOcJT83fR65YnCQoKtjocpZQdgoKC6T76MR79eDU5OblWh6MscPRkCi98s5UO/xhjdSgKLcY8WlBQMC16DKLtTc/y7OKDTF30u9UhnSMjM5snPk2k182PExikd7yV8iZhEZE0HTqBxz5cYXUoys0OJZ3irvfX0v3WZ3SBbQ+hxZgXiIiKptuoh9hVqRO3TV5MSlqG1SGRnpHFbVOW0fGGRwnQtcOU8krV6zQitdbFfL5ii9WhKDfZf/Qk98z63VaIhXvnZDFfpMWYF2nSuR9Nhz/ObdN+YuueI5bFkXYmk3FTf6TliIlUqVbTsjiUUo5r3XcYczcmc/DYKatDUS729+ET3PfJRnrc8rSOiHkYLca8THTlWDrf/DTPLjvBd+t2uf38qemZ/OfdZTQdOoHKVau5/fxKKedrP+zfPPLxKjKzsq0ORbnIx8v/5OGvttFrzNO69JAH0mLMC4VHRtPz+vv48q9Anv30F7fNtkxOSWfs1BVU7TeearV1U3SlfEVM1XgSLhvLS1+usjoU5WQZmdlM+moVa3Ka0GfsMwQF++7uL95MizEv1umq28lpdx2jXlnEnkPHXXquXQeSGDfjFy4a/SS1GzZz6bmUUu5Xp0lLDlXpwDertlkdinKSgt7eE03/Scve/7A6HFUKr9oO6acdx7wnWDfKysrk6ynP8vjoAcRWdv4+kJmZWTz6wVIuGT6OSN1nUrlRzZhwGleL8o6Vj8vw58HT5nhaptVhlCo3N5dfPnuLidf3sToU5SBjDA+8/indR9xNbLy2lFghMEDo2si+7dy8qhjzByIyxhgz3eo43MXfPi/oZ1a+yx//nv3tM/vb5wX3fGa9Tel5/G0FPn/7vKCfWfkuf/x79rfP7G+fF9zwmbUYU0oppZSykBZjSimllFIW0mLM8/jVvXj87/OCfmblu/zx79nfPrO/fV5ww2fWBn6llFJKKQvpyJhSSimllIW0GPMwIvKEiBwQkd/zHwOtjslVRGSAiGwTkb9E5D9Wx+MOIvK3iGzM/7tNtDoeVxCRd0XkqIhsKvRcrIgsEZEd+f+tYmWMynX8JYdp/tL85UxajHmmV40xbfMfC60OxhVEJBB4C7gcaAGMEJEW1kblNn3y/247Wh2Ii7wPDCjy3H+ApcaYJsDS/O+V7/LpHKb5S/OXs0+qxZiySifgL2PMLmNMFvApMMTimJQTGGNWACeKPD0E+CD/6w8A3ZtFeTPNXz7KqvylxZhnGi8if+QPl/rq7ZzawL5C3+/Pf87XGWCxiKwTEX9aPLG6MeYQQP5/dX8W3+brOUzzl+Yvp9JizAIi8r2IbCrmMQSYAjQC2gKHgEmWBus6xe3X5Q9Te7sZY9pju71xh4j0tDogpcpLc5jmLzR/OVWQ1QH4I2PMpfa8TkRmAPNdHI5V9gN1Cn2fABy0KBa3McYczP/vURGZg+12xwpro3KLIyJS0xhzSERqAketDkhVnOYwzV+av5xLR8Y8TP5fdIGhwKaSXuvl1gJNRKSBiIQAw4GvLY7JpUQkUkSiC74GLsN3/36L+hq4Mf/rG4F5FsaiXMhPcpjmL81fTqUjY57nRRFpi23I+29grLXhuIYxJkdExgPfAYHAu8aYzRaH5WrVgTkiArZ/ex8bY761NiTnE5FPgN5AnIjsBx4Hngc+F5Gbgb3A1dZFqFzM53OY5i/NX04/r67Ar5RSSillHb1NqZRSSillIS3GlFJKKaUspMWYUkoppZSFtBhTSimllLKQFmNKKaWUUhbSYkw5jYjkisjvhR71K3CMyiJyu/OjO3v8C0TkVxHJFJH7XHUepZR30fylrKRLWyinEZFUY0yUg8eoD8w3xrQq5/sCjTG5dryuGlAP20avJ40xL1ckTqWUb9H8paykI2PKpUQkUEReEpG1+RsHj81/PkpElorIehHZmL+nHdgW12uUf2X6koj0FpH5hY43WURuyv/6bxF5TER+Bq4WkUYi8m3+JrY/icgFReMxxhw1xqwFsl3+4ZVSXk3zl3IXXYFfOVO4iPye//VuY8xQ4GbglDHmIhEJBX4RkcXAPmCoMea0iMQBq0Tka+A/QCtjTFsAEeldxjkzjDHd81+7FLjNGLNDRDoDbwOXOPtDKqV8kuYvZRkdGfMz+VdjZ0QktdCjloPH7J2/bcQZY0zb/MfQ/B9fBozKT3KrgapAE0CA50TkD+B7oDa27TbsPefd+e95VEQOishkoCvwRf65pgE1SzuGHecIEZEv8//MTFmJVURiRWSOiKSJyB4RGenI+ZVS59L8VTEi8nh+Ditxg3cRqS8iy0QkXUS2lvZa5Xw6MuafBhtjvnfTuQS40xjz3TlP2obq44EO2Paw+wsIK+b9OZx70VDwmm+Ae4D2QB4wB8guuCJ1op+B14Av7HjtW0AWtqTcFlggIhv8YM86pdxJ81c5iEgjYBhwqIyXfgL8CgzMf3wpIk2MMcecHZM6n46MqbNE5GIRWSkiySKyofBIkIiMFpEtIpIiIrsK9U5EAouAWkBkwZWqiLwvIs9g20h3nIj0FZH9ItI0/z2TgARgHZCOrSm1GvAm0FxEdovIXcAeoIWIhIpIDNAXwBizE1sSA1vCzAbSROTq/LhERNo48udhjMkyxrxmjPkZKLW5Nv8zXQU8aoxJzX/P18ANjsSglLKP5q8STQYexHahWCwRaYqtMHzcGHPGGPMVsBFbTlNuoMWYAkBEagMLgGeAWOA+4CsRic9/yVFgEFAJGA28KiLtjTFpwOXAQSDNGBNljDlY6NDvAH8CM7CNGE3DNiKbhm30yACzgK3A/wFrsY1CZQFPAS2Az4E/8l/3W6FjRwK7gSSgDfAv4GYR2QBsBgqaaslP0MkickpE8oDHgZdEJE9EHnfkzy5fUyDXGLO90HMbgJZOOLZSqhSav0r8c7kayDLGLCzjj7AlsMsYk1LoOc1fbqS3Kf3TXBHJyf96uTHmH8D1wMJC/2iXiEgituHqD4wxCwq9/0exNbH2ANYXPFnctHBjTB7wcP7rPzLG9AHITyh3GmPezf++M/CFMeapgveKyEPAcGPMaOCBYo4dn/+6JsAo4DdjzIDiPrAxpnKZfyqOiQJOFXnuFBDt4vMq5W80f9lBRKKA57D1vZWlpPxVu7znVRWjxZh/+kcxPRf1sE2vHlzouWBgGYCIXI7taqwpthHVCGzD2I7YV+T8tUQkudBzgcBPZR0kf/bRZmyzj/7pYEwVlYrtqruwSkBKMa9VSlWc5i/7PAl8aIzZbcdrNX9ZTIsxVWAftn+4txb9gdimdH+F7eptnjEmW0TmYut1ANtQfVFp2BJegRrFvKbw+/Zhm07epCLBY/t/uVFJPxSR1FLe+5wx5rkKnrfAdiAov+F1R/5zbbDdblBKuZbmr/P1BRLkfzsCxAOfi8gLxpgXirx2M9BQRKIL3apsA3xsX/jKUdozpgp8BAwWkf5iW+gwTGxTvhOAECAUOAbk5F9lFh76PgJUzW9QLfA7MFBsyz3UAO4u4/xrgNMi8qCIhOfH0EpELiruxSJyi9hWo0ZEWgAPAUtLOnh+L0hJjxILsfzG24IZUCH5fy5S9HX5vSezgadEJFJEumHr+fiwjM+tlHKc5q/z9QVaYetta4utL24stlnfRY+/Pf8zP57/ZzcUuBBbEavcQIsxBYAxZh+24uFhbElrH3A/EJB/pXQXtkbUk8BIbDMFC967Fdu06F35Taa1sBUhG4C/gcXAZ2WcPxcYjC1pFDS1vgPElPCWbsBGEUkDFuY/Hi7v57bDNuAMtt6J7/K/rgcgIg+LyKJCr70dCMfWLPwJME6XtVDK9TR/FRvTcWPM4YIHthnhJ40xqQAiMlVEphZ6y3CgI7Y/o+eBYbqshfvo3pRKKaWUUhbSkTGllFJKKQtpMaaUUkopZSFLizERmSAim0Vkk4h8UqhRWimlPJ7mMKWUM1hWjOWvmHwX0NEY0wrbmizDrYpHKaXKQ3OYUspZrF5nLAgIF5FsbGu6HCztxZ+s2auzDZTyI02qRdGxfux5S4l4ELtz2E87jpn9J8+4LTCllLWCAwMY1iHBrvxlWTFmjDkgIi8De7EtF7DYGLO46OtEZAwwBuD6e5+h55Uj3BuoUsoyGdl5Zb/IIvbksML569+Pv0jLS3TfZaX8RVCg/deRVt6mrIJtXZgGQC0gUkSuL/o6Y8x0Y0xHY0xHLcSUUp7CnhxWOH8NvPoGK8JUSnkBKxv4L8W2fcQxY0w2ttXLu1oYj1JKlYfmMKWUU1hZjO0FLhaRiPztZfoCWyyMRymlykNzmFLKKazsGVstIl8C64Ec4DdgenmPIxhigvMIC4Ritgy0nDGGjFw4lR2AwfPiU0pVjDNymKfnL9AcppQ7WDqb0hjzOPC4I8eICc6jcmQYeRIEnpjMjCHM5EBaBsnZgVZHo5RyIkdzmMfnL9AcppQbeP0K/GGBeHYiEyFPggjTHKaUKsLj8xdoDlPKDby+GBMRz05kACIeewtCKWUdr8hfoDlMKRfz+mJMKaWUUsqbaTHmJIk//8DNg7szemAXPnvnTavDUUopu2n+UspaWow5QW5uLm89+zDPvD2L6fN+ZPmiuezZuc3qsJRSqkyav5SyntV7U7rVv0cN5dTp0+c9H1OpEq/PnFPh427b+Bs169anZp16APS6fAi/LvuOeo2aVfiYSilVmOYvpXyXXxVjp06fpsmYyec9v2P6eIeOe/zoYeJr1D77fVz1mmz74zeHjqmUUoVp/lLKd+ltSicwxpz3nM48Ukp5A81fSllPizEniKtek2OHD5z9PunIIWKrVbcwIqWUso/mL6Wsp8WYEzRr1ZaDe3ZzeP9esrOz+HHRPC7u3d/qsJRSqkyav5Synl/1jLlKYFAQtz/8HBNvG0Febi6XDR1O/cba/KqU8nyav5Synl8VYzGVKhXb7BpTqZLDx+7Usy+devZ1+DhKKVUczV9K+S6/KsYcmf6tlFJW0vyllO/SnjGllFJKKQv51ciYUkop5zh59BCHdm0m7fhhco/tIjAwAGMM2aePUS0mHIBTqWfICa9KYGAgAGdyoOoFnQkMDqVp++4EBOh4gFKgxZhSSik7ZGWc4fcflpK+53dIP079uEhGtEsgolYwrRp2sOsYp9POsG3vDg6fTGf5zPkcT8vChFcmunFnajVuSXytui7+FEp5JkuLMRGpDLwDtAIM8C9jzK9WxqSUUvby9RyWl5vL7o2/knZgB7ERgVxX8xBd+rWv8KKwlSLDuai5bdulwV2bA5Cbm8eqzX/x06pf+PFgOhIZS/W2l9LkwosIyB9RU8rXWT0y9jrwrTFmmIiEABEWx6OUUuXhkzks7XQyO1d/R0RuCj2aV+fCDq1ITs+hcma4088VGBhAtwsb0u3ChgCkZ2SxaPVyln8wm+NnDDEN21KtaUcSdK9M5cMsK8ZEpBLQE7gJwBiTBWRZFY8jXnl0AqtXLKFybBzT5iy3OhyllBv4Sg4rnL9efm82O1ctpFZ4Lrd0a0CV6IZujyciLISrerXmql6279dvP8Da7Z+y8rtTZIZWoXrTdlRr3Ja4mgluj00pV7FyZKwhcAx4T0TaAOuAfxtj0iyMqUL6DbmGwSNG8/LEu6wORSnlPj6Rw/oNuYbLh13HC/ePIfOPb7i9TyOiI0KtDuus9k1r075pbcbmf//9+p2s/WUlG1LhWEYQ4TGxNOwyiMhKlakUG2dprEpVlJVTWYKA9sAUY0w7IA34T9EXicgYEUkUkcQVX3/ilBOfOnmcZ++6ntPJJ5xyvNYduxAdU8Upx1JKeY0yc1jh/LXwiw+dclJn569qVStxdMP3VAoN4Lo+LTyqECvOpe0b8dDwbky+pRuzbuvIi4NqkLDzM05++xK/vTeR5e/9lzVzZnD88AHSU05bHa5SdrFyZGw/sN8Yszr/+y8pphgzxkwHpgPMWLHLOOPEP8ydRd7BDSyd8xFDR+tollKqQsrMYYXz1+LNh82e4+kOn9RZ+SsvN5etP86mWeU8br60JV/P8b5FZYOCAqlWJZp/9W979rm0M5kcPn6ab3/7P3YdSeF0bjDZEdWp0aw9dVt2IjjUs4tN5Z8sK8aMMYdFZJ+INDPGbAP6An+6+rynTh7ntyVf8tY/E7hj/pf0HXo9lSrHuvq0SikfY0UOc1b+Sj52iL9Xfs11PRuTEFeJvw8ccUG01ogMD6VRQjx3JMSffe74qTSWbVjN0g/mkG5CCa3VnJZ9hhIRFW1hpEr9j9WzKe8EZuXPQtoFjHb1CX+YO4vBjaFJ9XAGN07T0bEy/Hf8CFJTU857PioqmocmO+e2sVJezK05zBn56+SR/Zz8/VvuG9KWwED/WHS1akwkw3o2Z1jP5uTl5bF1zxHe+vhRTkslGva8ioRmbawOUfk5S4sxY8zvQEd3na/gqvKBa2MAGNkhhhGf6ehYaVJTU2h4y5vnPb/rnTstiEYpz+LOHOaM/PXXqsXE5RxizIALCQyo2Fph3i4gIIAWDWry1u01OZOZxftL5vLdovdp1u966re0b/FapZzNPy6L8hVcVVaNCgZs/x3cGJbO+cih4/73gXFMuH4Q+//eyfV92/Pt7I+LfV1K8gn+XPU9f676nuQk37ktoJRyPUfz1/aVC+kUn8G1vVqcU4iNuO8luox4gG1/HyChz2j+76vFLonfE4WHhjBuUEdm3dWLhoe+ZemUhzhx9KDVYSk/ZPVtSrfauOYnfjqUwSd/7D/n+crHfnLoVuVDL04p9vnTJ5M4suM3Mo7to0pEMMc3rWF4x7qICAu+/56j2XDodDZhNZtS/6LLqF6nQYVjUEr5tormL4Nh20/z6ZEgtG9c87yff/Ly/U6P1duEhgRz84C2DO6cwmMfv8jxTtfQpF1Xq8NSfsSvirHHpnzhlvMkHz/C7tXfUT/aMKRFLep0aW1bvbpD2NnXdGpR7+zXf+0/xtxV77J4bgYtBt6iK00rpc5T0fy1a/1PdK9taN+4hpMj8j3VqkTz1m19effb70n8egsdr7zZ6pCUn/CrYszVMs+ks3PVIhJCz3Bnv0ZEhoXY9b7GCfHcNyye9Iwspi38mCULUmk37C7iatUr+81KKVWCo7u3UDltN+07trA6FK8RGBjArVd0oPqv2/lm9jQ6/XNs2W9SykFeX4wZY8AYqODGtc5y4uhBDq2dz7VdG1I7ru45PzMGbHsIly4iLIQJ/+zE+JxcHvnwLXZXbk7b/iMJDrFuXZyoqOhim/WjdEq4Ug5zZf46dfwoObt/5fpLWzt8eHtzmC+5sktTRLaz+PsvuPDSq60OR/k4ry/GMnIhzOSQR5BlBdm+TauIPLmNOy5vTWjwuX+kxkCmCSQw5/zlIUoSHBTIC6N78dPGPbw++X4uHDKWmo1aOjtsu+jyFUq5jqvyV8aZNPatnMu4Aa2cUoiVN4f5ikGdm7Dyg5/Yt7URdS5ob3U4yod5fTF2KjsA0jIICwRxczGWl5fLga3rqRuYRLdOTTmTDWeyc4q8yhCYk0Jkbvm3LunRuh5dW9Thuc8/IXFDAh2GjnX7Z1RKuY6r8tfOXxcxpFMDsvKErPSiOam8Kp7DvJ2I8NyNPRj+0rvE121GWESk1SEpH+X1xZhBSM4OhGz3njc7M5MV7zzG6O616d+hEWTuL/tNFRAYGMCjI7qw9PfdTJ/2OBePmECU7oOplE9wRf7ambiMBid/p3F4W8hMdt6B/ZSI8NjVbXlx7jS6j7zH6nCUj/L6YswqKz58gZdHtCahmnsKo75tG3BhvTgmvPcEba6bSOWq1dxyXlU63aFAeZLTJ5NIWjuP5+/sZ3UoPqVlg5q0WPs3h/fuokbdhlaHo3yQFmMVsO6bdxnWKtJthViB+CrRTL2tF498+ByHWwzggi4D3Hp+dT7doUB5kg1fz+Cl6zpbHYZPGndFW26Z8jrV//2atosop/OrFfidIXHOdAbWOMWw7hdYcv6oiFBeG9uPhslrWT33/8jNcbQfRCnlC/ZsWk3b2Eyqx1ayOhSfVCkynBt71GfjsrlWh6J8kBZj5bBv6+80yNvDlV2aWh0Kdwxuz60ts/jxncfJyXFzw5xSyqPk5eWxYhOB5AAAIABJREFUbcnH3DlYZ/y50sBOTTix8QfycnOtDkX5GC3G7JSemsK+JTN48JouVodyVufmdXlmaFNWzHiM7KxMq8NRSllkR+Iyru5Um+CgQKtD8Wkiwug+TdmwdLbVoSgfo8WYnVZ+8Cwvje7pcb0CjWrH8d+rW9gKskwtyJTyR4fWLmJYj+ZWh+EX+rZrgOz+mVPHj1kdivIh2sBvh9+XfMENF9cgrnKU1aEUq36NWCaNvJB7ZzxC91ueIjQs3OqQ/IbuUKCsdmTfbppXCyEgQK+t3UFEeGJkF+769C0uufUJq8NRPkKLsTKcPHaYgL9/4YoBl1odSqlqx1fm1evb8cC0+2lw2b+o01x7R9xBl69QVvt97tu8P9Zz2if8QXyVaBpFZnDi6EFiq9WyOhzlA/RSqhTGGNZ9+jJPXtfV425PFqdmXAwz7xnA7sX/x5k0/9u6RCl/k5ZyinrRtlnWyr3u/UcHVn/2um1/UaUcZPnImIgEAonAAWPMIKvjKWzrqu8Z3LoqsZW8ZwsMEeG1W3ox/t3HuPimJ4mI1mnuqnx0IdvysTKHbVvzAzd0ruvOU6p8VSpFMKxNZdb88h0tu+uaj8oxlhdjwL+BLYBHVQ1n0lI4kTiX6+6+3OpQyq1qTCRv39yVO955jE43PUFkpcpWh6S8iC5kW26W5bDTm3+ka39dbd8qI/q0Zunr80lr04XI6Birw1FezNJiTEQSgCuAZwGP2vRrw6KPePiqjl5xe7I4VSpFMGVMd26f8QQdbniU6MpVrQ7JLXRUR7mTlTksPeU0tSsHu/OUqggRYdLNvbh5+tP0v+slr/19oaxndc/Ya8ADQF5JLxCRMSKSKCKJK752zy/T9NQU5MifNK3r3fs/xkSFM3VMD355/xm/6WsoGNUp+iiuQFPKCUrNYYXz18IvPnTqif9cuYhru+o+iVarHB3BhP6NSJw9xepQlBezbGRMRAYBR40x60Skd0mvM8ZMB6YDzFixyy0VxW/f/B+PDevojlO5XHRkGA8Oas4b7z9Hz5se9vkrt1PHkzjw945in1fW8NXRSntyWOH8tXjzYbPneLrTzp/y11raDyz2tMrNureqyy9b1/DXuuU07tDb6nCUF7LyNmU34EoRGQiEAZVE5CNjzPUWxkRayinCTu2iYe3+VobhVJ0uqM2VR5L5ZcV8WvUabHU4LpVn8giNO7+hOc8UP/jqq4WCJ/HhHjTLcpgxhurheT5/ceVNHhzWiRe+/IEdOdk06ax9fKp8LCvGjDEPAQ8B5F9V3md1IQawfu50nr26k9VhON01vVryxwfLObSnBTXrNbI6HI/hw4VChelCtvaxModtX/sDvVvVdsepVDk8OKwzr875hbULj9Gh/7UEBOr2VMo+njCb0mOcPplElaxD1KnewupQXGLi8K7cNuV1oq5/gujKsVaHozyUjgh6vmM7fqfzIF1s1BNNGHoRy377i8mv30OHaycQX7u+1SEpL+ARxZgxZjmw3OIw2LTkM54Y0s7qMFwmPDSEp4ZfxIOfvUqfW57wyas2MXkcfP/uYp9XylXcncMiclOoUdWjVgNShfRp15jurRvw6Edv80dGNB3+MYbKcdWtDkt5MI8oxjxBdmYmOYe2ULe6by/eV69GLLd3r8YH8z/k4iE3WR2O01WOr6G3HZVPy8rMgIxTVoehyhAcFMjzN/XiyInTvDn/VdacFOp2uJT6F3YmLMIz9zlW1tFiLN+G7z9n/MDWftEQ27NNQ+au/p4Th/cTWyPB6nCcSvudPI/+nThX5pk06sXrL/OyJCWnMvb5j5j+0A1UjbFuF5XqsZV4ZlQPjDEsWL2Bb2Z+Q0pIPM36XE2tBk0ti0t5FvGm9adcubTFT5PvZubd/jMDJis7hxtf+44et79KcKj/7mtXdDZl8rHDGAkgQAKIqRp39nmdXWmN5jUr0b1JnE9cITlraYt182dyZ5ssWjao6YSoXMMTCqFXZi1m/pIfGdSvF/dcd5klMZQk7Uwmr81LZOuxbKo0707z7gMJCtIFfH1NUKAwulsDu/KXjowB29etoPcFcWW/0IeEBAfx5IjOPPvpJLrf+LDV4VimaIE18aZBeptTebTU4wepVdWzZ0TPXLCSk4f38cH8XywphJKSU5n/41qm/DOOcfPXcuOgbpaOjhUVGR7KxOHdMMaw9LfdTJ98P60G30rNRi2tDk1ZxOoV+D3Cvl/mMLp/W6vDcLumCXH0qRvAxqVfWh2KUspOlUwaVSpFWB1GiQoXQvN/XMvxU2luj2HmgpUMahxAs2qhDGocwAfzf3F7DPYQES5t35CZd/bGrPuYxNlT/Wa3FHUuvy/Gkg7to3WtCL/oFSvOTf1aE3pgDUf27bQ6FI9xeN8uDvy945zHyaSj/Hf8CKtDU34uLzfX1sDvwawuhAqKwVHtbSNho9pHWlYU2issNJjHR3ZlZHPDL7Ne1oLMD/l9Mfbb3GmMG9jG6jAs9d9R3dk7/w2Sjx+1OhSPkJubS2hc3XMewVGxur+lstzJY4dpU8dzbrcV5QmFUEExGBdl68KJiwry6NGxwnq1rsvYTtGs+eL8Vgnl2/y6Zywz4ww1QtKJreS5yc0dgoICeXJEZ+6eNYletzzlUw39ut2R8iXHdm+mVfUYq8MoUWmFkLt6x5av387Bo5l8vPHci8taR7Z7XCN/cbq2qMPKrWvYu2kNdVv53m4wqnh+XYyt/+ZdHhjQyuowKsyZM5ZqxsUwaURr7p3xCD1ueYrQsHCHjucpRVB5tzuKiopm36ePEBx17g4FgWERQJYrQlTKbsd3beSykY2tDqNEnlAIfT1pvFvO40p3X9mB4a/Ook7Li/y2hcbf+G0xZowh+8hfXFDvUqtDqTBnz1hKqFaF165vx4QZj9D9lqcJDa94k7C37vn40ORPdEal8ljZyYcICvTctal8oRDyBEFBgVx9UW3WrvmBZp37Wh2OcgO/7RnbkbicoR29d6NdV81YqhkXwxs3XcSaGQ+wZ/NapxxTKeW4M2kpxEUIAQF+m7ZdJik5lav+M9Wjmvyv7tmcvYmLrQ5DuYnfjoztX7eUfqO9dx/Kc2csZTi1J6NalWg+mDCAl2fPZ+WGn2j/jzF+tX2HrhivPNGRfbvp1jTe6jB8ktXrohUnICCAahGCMUZvVfoBvyzGUpKP0yAqk+jIMKtDqZCCUbHPr7EVB6PaR3LN585d2FBEuP+qTmzafZjn33mY6hcNomnnS/3iqlwb+5Un2rtmEQ9e69mLvXojT14gtl1CJHt2bqVO4+ZWh6JczPd/sxZj/dfvcks/723cd+fU7VYNajDz7n70CNzI4jfv59Df251+DlcqGOUq+tBRLuVtck8foVKkYxNrvIU7bxtavS5aaapGh5GXm2N1GMoN/G5kLCcnm7D0QzSs3cLqUCrM3TOWAgIC+EfXC7isfUNenfMey74NoHqrrjS7uD+BQcX/L+Qpt/p0lEv5gmMH99Kqhn8UYuC+24buuMuglD38rhj7c8V8RnZvYHUYDrFqxlJEWAgTR3TnTGYWP2/ayQdvTyC6WXda9/kHwSHnrk2mRZBSzrN79Xfc18U/blG687ahJ6yLphRYWIyJSB1gJlADyAOmG2Ned/V5j235ld6X9nD1aXxaeGgI/To04tL2DVm+4W++ev9BTodWp9XA0VSJr+G083jKWmVKFcedOSz9wJ808+JleMrDlZOTiip6lyEvz3D8VBotGm3RYky5lZUjYznAvcaY9SISDawTkSXGmD9ddcLTJ5OoF6NTw51FROjTtgF92jbg2MkUXvzyRTZkhVGrQ3+aduzl8PG9da0y5TfcksOO7t9NmwT/mM3s7tuGRe8yvDJrMfOX/Ei/Ttowr9zLsqrEGHPIGLM+/+sUYAvg0oW/Nn77EXcObO3KU/it+CrRvHRrX2be1oX2Z1ax9PV/89vS2aTrfo7KR7krh21bMouxA/xj/1wr95V01dqNStnDI4aIRKQ+0A5YXczPxohIoogkrvjasVtTuUl/k1CtikPHUKULDAxgRJ9WfHJPP25MOMTmmRNZ+dkbpCSfsDo0pVympBxWOH8t/OLDch83Iz2N4PQjxET5R/P+8vXb+XhjJh3fOnr28fHGTJavd/0sbk+cVWmM1REodynxNqWItAZmYLvSWwQ8aIw5mf+zNcYYp+xgKiJRwFfA3caY00V/boyZDkwHmLFiV4X/19y/cxudG1aucJyqfESEzi3r07llff4+dJxJXzxFcngCbQfeRKXYuAodc/M795KbkU526gkm3jTo7PPaQ6aK4wk5rHD+Wrz5sNlzPL1cx06cO53HrurgjDC9glWTkzx1VuUv24+RMLSOZedX7lNaz9gU4AlgFXAL8LOIXGmM2QkEO+PkIhKMLYnNMsbMdsYxS7J3zULGDGjiylOoEtSvWZU3x/Vj/9GTvP7N8yTmVKbH9fcTHBpa9psLyc1Ip9ZNr5GZtJfa9f/3d1nRHjKdIODzvDqH5eXmEpC8j0YJnrsXpa/w1FmVOQEhRFbSQQR/UFoxFmWM+Tb/65dFZB3wrYjcADg8eCq2/R3+D9hijHnF0eOVJff439SO16RmpYRqVXjp5j4cOJbMA1PuIeaC7rQbMKLE1xddqyw79QSZSXsJDAx0Sjw6QcDneXUO2/zTfK7tWs/Zh1XFcPfajfYwxnDwVBYtLTm7crfSijERkRhjzCkAY8wyEbkK21VgrBPO3Q24AdgoIr/nP/ewMWahE459jlPHj1Ev1j96LrxB7fjKzLpvIN+t28V7k++n3TUTiK1W67zXFR2dmnjToHNGxDyFjrB5LK/NYTnZWRxdv5j+91/u6KGUHay6PVoaYwzhMRVr6VDep7Ri7AWgObYhfgCMMX+ISF/gUUdPbIz5GXDL7qfbflnAPT0975e4N0pKTmXs8x8x/aEbSuylsOc1AP07NKRz0xo8/enL7KjSgouu/JdXLjuiI2wey2tz2OYf5zFuQAvdINqP/bRpL5Vq6+8tf1Hibz5jzMfGmFXFPL/XGHOra8Nyrrzjf9O8vvMWI/VnhbcpceQ1BSpHRzDp1r7c3CKH799+mJPHDjszXOXHvDWHZWdmcmLTcnq21luU/mzX4VPUaNrO6jCUm/j8dkh5eXkEZeiyCs5gzzYlFd3KpEuLOsyoU5V73nueZv+8j9gaCee9xlP2u1TKlTYtm82/r2ito2J+7s/9ydTuqM37/sLni7ED2zZwUeNqVofhE+zZpsSRrUwqR0cwdVwf7pj6Mg2uvJv42vXP+bmz+6+0uFOe6MSORDoPusTqMLySvS0S3iBDwnQmpR/x/WJsy1pu7qnD/Y6yZx0eZ6zVExIcxNvjLuHOaa+RN/BOqtdx3ebI5SnuSmvSV8pZdvz+K5deoL+AK6pwi4Q37y1pjCE1PcPqMJQbldktLSJNRWSpiGzK//5CEXnE9aE5h5zcS70azpg45f2SklO56j9TK7TNhz3blDhrK5PgoEAmj72Eg4smc2TPtnLH6goFTfpFH6mpKWdH2Io+tFDzDN6Uw/5e/in/6t/W6jC8ki9tZ5SccgYqn9+qoXyXPSNjM4D7gWlwdjbSx8AzrgzMGXJysjFZ5Vvx2pc5ctVozzo8zlyrJygokDfHXsKISZPpfcfL5V4g1p10+QqP5xU5bNemdVxxYXWvnFHsCRxpkfBEQcGem/OU89lTjEUYY9YUaSbNcVE8TpWZnsYFNaOsDsMjFG2sH9SjLQ+9PbvM3oqCHoz3Hhtd5q1GZ67VU3DeJ269kuc+eI5Lxjx5zs91bS9VDl6Rw/b8+g33jWxhdRheyVO3M6qo9Mws0AkcfsWeYixJRBqRv2K1iAwDDrk0Kic5fmAX9aP06gLOv2p8cPIXnDp6sMyrR6t6MArOu2TVJq5oWoP1K7+jedf+Z39enrW9tHDzex6fw44d2E2jiDSvLBw8gaduZ1RRX/6ygwad/2V1GMqN7CnG7sC20e0FInIA2A1c59KonOTI1nX8u19jq8OwXNGrxiuahfH2zzuZNzqBOxeVfPVY0WUqnBVvwXk/e+EufvjgG9JbdyEiulK5j6eLsvo9j89hW7/7kDdHdrY6DK/lidsZOSLXGMIitDD3J6UWYyISAHQ0xlwqIpFAgDHm/CEGD5V2dDfREZ2sDsPtik7vLnrVuGBLKiNbBVE1JKvUq0erejAKzls1IpDkk8lMnf0jjw/vyoOfv0Hfm63pu9ZlMLyTN+SwjPQ0IrKTiYnSLdsqyhO3M1KqPEotxowxeSIyHvjcGON1U1NiI4IIDQm2Ogy3K3prsfBVY05uHqdTUvlwaBgp6RmMah9bbG+FVT0Yhc87M/EUMSF5fPztSm4f1oeu1bPYs3kd9Vp2cNn5S6K3M72TN+SwtXOmMXFwa6vDUEpZyJ5pO0tE5D4RqSMisQUPl0fmoJycbM6ke2Tudanipnd/PWk8iR8+QeKHTzDqiu6M7xFPtxYJNKlT7WxvxdtfLjtn2QtnLVNRXgXnBZi/+TRTB0cSajJ5+8tl3DH4InYs/dil51c+yWNzWG5ODkGnD9Csri5MrZQ/s6dnrKCL8I5CzxmgofPDcZ7kY4e5qL7/LZ5Y1q3Fknorcsx6YoMyix1NK8zVPRgF5528MpkhjSHX5NGnfgBf/bCOR28exCXNYvhrwyqHbhtufudecjPSyU49wcSbBp3zXh0B80kem8O2rFzEtV3qWh2GUspiZRZjxpgG7gjE2dJOnaRWSKDVYbiVPbcWi+utSEpO5ZoHXmfKoP816tvbg+Hs7Ue+njT+bDwTB0QTFxXExPgcNnyewvFTadx6eTuGvzmvXEVT0cItI+koNYY/Q2BgIDXq/O/3cWkN/Toj03t5ag7LycnmUOK3XHb/5VaHopSyWJnFmIiMKu55Y8xM54fjPPsSFzNxRHOrw3Crik7vdqRR3xVLX5T1OZrF5nH8yEGqVq9l1/GKFksTbxpE7fpNyhWTzsj0Xp6awzYvn8vtlzXXDcFVsYwxVoeg3MienrGLCj16AE8AV7owJqfIOJNGcKBvj4wV3d5o+frtfLwxk45vHT37+HhjJsvXby/1GPN/XMuo9rZRrVHtI+3eSsRV24+U9TnuGtSO3+dNc8q5lF/wuByWk5NN0h/L6d3WIwftyuTI1mqqbN0uqMWuNUusDkO5kT23Kc+59BeRGOBDZ5xcRAYArwOBwDvGmOedcVyAKgHpBAX5djFWdFSqItO7HVks0VVLX5T1OeKrRNMsOoOkg3uIq6WbwKvSeWIOWzd/JncO9N7V9j1pQ+6SWiWc3ULhTk1rx5K18YTVYSg3qsgmaOlA+e7xFENEAoG3gMuBFsAIEXFKdkpPOU14YJ4zDuWxnDUqVZHRtMLnr8iImjPcPaQjf8z/P7ecS/kcS3PYkX07iTu9hW6t6jsagiWcPSLu6Chb4cLQnue9QUhwIKlJB60OQ7mRPT1j35C/jQi24q0F8IUTzt0J+MsYsyv/PJ8CQ4A/HT3wzvU/MqR9HUcP49GcNSpV0cUSrd5+pEqlCAY2Dee3X7+jeZf+Zb+hEF3A1b94Ug7Lzsrk9y9e5dN7vG9V+ALOHhF3ZJStpF1CrNo9xFkiw0OpGqY9Y/7EnqUtXi70dQ6wxxiz3wnnrg3sK/T9fuC8/UBEZAwwBuD6e5+h55Ujyjzw4Y0/0ffuXk4I0TN5wqa4nrD9yHWXtGLBy9/QqH0vQkLD7H5feWY/piSf4NOX7ic8LEwLOO9lWQ4rnL/ufPR5jmxL5OVRnb22hcLZucfRoqmkwtCq3UOcKSogg5Tk40RXrmp1KMoN7CnGBhpjHiz8hIi8UPS5CihuCtF5lwLGmOnY9pVjxopdZV4qpKWcok6Ub19RWD0qBZ6x/UhAQACvjO7Kfe8/TZ+xz7rkHGsXfUbQkY107TuaS0aMc8k5lMtZlsMK56+Hn3zGXNE9gfo1PGK92Qpxdu5xpGgqqTAc1KNtic8/9PZsr+khu6pLY9799TvaXz7S6lCUG9jTM9avmOecsTDOfqDwvcQEwOGb5L8vnMntA1o6ehiPVtE+L19UKz6G4e3jWDtnhtOPnZJ8gm0r5jBpaG22rZhD6qmTTj+HcguPyGF3XVKf/h0sX2fWIc7MPY72nZZUGD44+YsSn/emHrKOTWuRvHO91WEoNylxZExExgG3Aw1F5I9CP4oGnPF/81qgiYg0AA4AwwGHLgH27dhEQw7SKKG7E8LzXO4clXJkRpK7ZjNd2aUpgav/4sN3nqLL8AlEOOnW4dpFnzG4CTSuFs7gJmmsWfipjo55EU/LYTXiq8AJ7yjoS/q368zc4+goW0mtEsdO7WV9Xi4fbcggIMA2eJmXZziWfJzvx9b2mh4yEaFpXDAnjx2mSnwNq8NRLlbabcqPgUXAf4H/FHo+xRjj8JxbY0xO/ga+32GbFv6uMWZzRY93+kQSuxfNYNrtvR0NTRXiSHOtO6e/X9G5Me0anuLh9x+hepd/0qh9T4cW0ywYFXv82hgARrSPYeRnc+g0cDhRMVWcFbZyLa/KYZ7EHf92He07LakwfGXWYmZ/u4x0gvlu8gSqxkTyyqzFcGCd1/WQDb24IVNWfUfHwTdaHYpysRJvUxpjThlj/jbGjDDG7AHOYOuHiBIRp2ymZoxZaIxpaoxpZIypcNNPctIRNsx6mqnjehERFuKM0BSOTWF31YKwpakVH8P7d19G05M/89OU+1k9911yc3IqdKyCUbGqkcGA7b+Dm8CahZ86M2TlQt6UwzyJu/7tfj1pPIkfPnHew5HRt4LYe9QLQc6c4O2vllm+DI8j2jauRfquNWSeSbc6FOViZfaMichgEdkB7AZ+BP7GdrXpEbb+PJ+ds19g2u29iQwPtTocpyq8/o4VK16f21wbUK5eC0fe66ibB7Tlw7v6MKZlFqum3sPPHzzHzt9/Ldcxdvz2C5/9kUGPt/affXz2RwY7fvOOfhP1P56ewzyNlf92HTVzwUp614HVe9J5fWA4Xy3+lSlfLS/xdqg9rN5t4OXR3Vnx/nNnv//v+BFMvGnQeY//ji97pQHlueyZTfkMcDHwvTGmnYj0ASz/Wz9+5CAbZr/JkAtjGX57X6vDcYmiixa6c8VrR6awu3LpjU7j3iIpJfO85+OiQ1kz5Y5zX9u8Dp2a1+Hk6XQWr1/B3Nc/Ib5df1r1GFjmLcyxL37kUJzKo3hkDvNEnrBsTkVt23OEaV8uYWTbSAY1CaJzQjB966Tz+ZJEQoOp8O1Qq3cbqB5bibbxhv3bNpDQrI3uk+uj7CnGso0xx0UkQEQCjDHLROQFl0dWgswz6fz84Qs0iTzDG9e3J76Kb67zVPhWwZh5a8gzhnfcuIChM7ZJcsXSG0kpmbS8ddJ5z2+ecW+J76lSKYJre7fiml4t+ebXrXz25mIqt+jJhZcMJSCgIptQKC/jUTnMk3nCsjkV9Z+3vqRvXcPSbSnMvjaK4ADhX+1CWLovm+8mP2zJOmjO8tC1XRg5aQZxdfR/W19lTzGWLCJRwE/ALBE5im3hRLdbNXs6eQf/4PVR3ageW8mKENym8K2CXrVPsvFILs2qVXVb86kjzbWF35uTm8ehE2nUjI2krhsXhC2OiHBl1wu4susFLEncwbvvPMFF1/xbF1X0fR6TwzydJyzmXBFJyakkbt5NIDlc2TSIo2m5HE3PRQT61cu1ZB00ZxIR3ri5O3fMmIgxvr2Opr+ypxgbgq3x9W7gOiAGeMqVQZXkgYuDaVbXGcsDebbCtwqyc/IY2CCXH3dmcjwt1223DRxpoi383ldmLWb+kh8Z1K+705JYVtppts59kwuG3kVIRMVGRvt1bEKzhBM8OespGv1jAvG16zslNuWRPCaHeTpPWMy5ImYuWMmEXlVZvv0U3+7M4ts9OVSKLNiVI4haOeUvJj3tlm18lWjuvvwCFi37FWOMQ7PFlecp8x6NMSYN28KGvY0xHwDvAFmuDqw4zepWs+K0blf4VsHxU6nUrxLIkGbBfLA2udzNp1Zy1aysI+uXEJ++kyPrFjt0nLo1Ypl6R1/2fPMaSYf2OiU25Xk8KYcp5yvIM1c0CyM4UJh/Sx0S4qL5bvL9Zc7QLK05v7hbtr3rQL/xr1rWzN+1RR2iQoM4+tPHOkLmY+yZTXkr8CUwLf+p2sBcVwbl7wqvct1pyjE6TEth0q8ZvPRjcrlXvHblTKCyju2KWVm5ubmkb13Bs1dUJ33rCrLSUxw6XnBQIC/c2I31X7xBXm6uw/Epz6M5zLcV5JkFW1I5mZbF/D9T7c43RSdJFVbcbgPvrztNQMZJSy+G61SLIfOPRWx+cRh/TRvHrnfuZNc7d+o+uV7OntuUdwCdgNUAxpgdIuIfQ1QWcfYq166aCVTasV01xB+Ue4Z+NVKIyw6lX3wm700aR25QOAEBQqdxb50zo9LeHQAqR0fwrx51mL/kU9oOuK7CsSmPpTnMhy1fv529h85wOiWVNy8P5c5FJ6kUHVVmj2pZzflF83BScirXPPA6UwZFWNrMX5DjUtMzGTdtOS2G3U9czTplvEt5OnumkmUaY84O6YtIEMVs6K08T1JyKvN+WMN/ugYyb9masyNYScmpDL73LQbdO7nCI2Zl3YIsbVaWI5+nXuVArm0VRqWa9RnVqxEJNarS+9+vcOmD089b8qK0q96iLr+oMYEH1pOWcqrC8SmPpTnMh309aTyjrujO+B7xXNG+DuN7xDPqiu5lXtSWd+Tente7c02yqIhQpo3rw6nvX2fT8rnk5eW5/JzKdewpxn4UkYeBcBHpB3wBfOPasJQzzFywkl61s6gXnUuvWllnk8fMBStJOriHnbv2VLg4KisxuWIz84JzVgm3/W8bGxnEkCZSbO9YRfrV7hnSljVfvl3h+JTH0hzmQZxdsFRkhf3yvsfe15fnAtAZIsJCeH50H3qH72TZtEfIysxwy3mV89lTjP3RfWCaAAAgAElEQVQHOAZsBMYCC4FHXBmUKpm9iaxgVGxgg1zqVQliYINc5i1bw/a9R5n3wxoe6xlE1XDDnKWry50U7UlMrtjqpKDAu+zDFPpOP0zf6Yf5bFMmJ3dvPO+1FelXq18jlsjMY+TkZFc4RuWRNId5EGcXLBUZhS/ve+x5vRVbwBUY2u0CnrvqAn6dci9bVy9123mV85RYjBXs3WaMyTPGzDDGXG2MGZb/tQ7xW8TeRFYwKla/SiBhQQHUrxJIr1pZPDj5C3rVzqJdzSD+eUEwccEZZ49lb6HniluQ9igo8MKrJtBm/JSzjwtHPXHO6xzZi+76Hg3ZtEx7u32B5jDP44qCpSKj8KW9p7g8aM85rN5GqmGtqnwwYQBt09ewdOojZKR7/t6b6n9Ka+CfC7QHEJGvjDFXuSckVZLyrAa9ZM0WNm5P4f1EQ+WwAI6k5hIgQkBgGg9eHUbViGBGtQlh3rY05ixdzY2Dutnd7O/pC0M6sop4rzYNmPLDMuh3tTtCVa6lOczDVHQR1dIm41RktL2097wya/F5ebCsc3jKmmSBgQHc0O9CBnRM4f537ie6zeW07jlI1yTzAqUVY4X/9hq6OhBVtvIksn6dmhOWcZRTaRnERIQQF5HFycwALm0YRP0qgQQHCHGRwpBmwfx0IIO3vljG8jUb7Cr0rF4YMi46tNjtj+KibRvFO1osxkcIuTk5BAbZM9lYeTDNYR7EkYLFXftDVnT7I0/bRiq+SjTvTRjIvJVb+PDN5bS6YgwJjZq5PQ5lv9J+25gSvlYWsCeRFb56/G71FnYcPMMzfUJ4+Pt0Jl8Rwe0LzjDztyw+/uPcu9N5BLA7dR2j2oRYvu2HPYpuCF6Uo8XipS3j+fG3X2l+UQ+HjqMspznMg1S0YHHn/pAVHblbvn47+w9n8PqK41SNiSQgwHYdYOXdAhHhH91a0K99I16d8z4/r65G12vvIiAw0JJ4VOlKa+BvIyKnRSQFuDD/69MikiIip90VoLKxp0+r8NVj/87NGd8jnsNZkYxqG0rH+jHc3bs6D954BQcWvXrOY+MnTxITFkj3mhnk5OaVq8fKHsX1YLhzCnh5Xd65GUkbl1kdhnKc5jAPUtEZ1q7uxSrIRdv3Hq1wr+nXk8Zz/cBuNIoL5fqB3ZwyYclZIsNDeWRkdx7oGcMPb/ybY/t3Wx2SKkaJxZgxJtAYU8kYE22MCcr/uuB7h3bpFpGXRGSriPwhInNEpLIjx/MHZSWywlePs5euZt6yNVzRLIz5m08zrlMYp1LSGNk2gnk/rDlvfbGCZv+YoBySklPL1ZBvT1FV3KQDd08BL4+Q4CBIO2F1GMpBmsM8S0VmWBc3Gae4HOaIglz04OQvKjwxycqZlPZqWb86H024jNPLp7D263fJzsos+03KbexZ2sIVlgCtjDEXAtuBhyyKw2uUlcgKXz3GB2fQq1YWC7akMqhJEDWjAokJE8g+Q6/aWeetL7ZkzRbeS0zhmi8zuOitI7R/84jda4KVVVQVl6S8IXF1aVKFfdvPXzJDqXyaw9yguDsCxeWwiiqcixI37+ajDRkVWhvR6pmU9goOCuT5m3oxqmkGK6ZN5PTJJKtDUvks6VA2xhRepXMVMMyKOHxF0X6yjKwc3ktMJydPCBDDpF9tCwEGBqRhTB7N4kOY/+P/ei/6dWpOv9rp3NMzhldWnILaHeye4VRWL0dxPRhAhfoy3Klni9q8tuE36jRtbXUoygNpDnOPopNx8vIMx06mnJfDKqpwfprQq6rdua8wT5lJWR7dW9Wjae1Ynvz4aWJ73EDdFh2tDsnvWTUyVti/gEUl/VBExohIoogkTp/nmVcbVkpKTuWyO1+jT13OXj0uGluPCb2rcf8NA87pDbt7ZH8m9K7G9+Pqnb16c2RNrpkLVnJ5QwjMOs3lDeW8q8Hijl1wC7Ui53Onlg1rkpu0y+owlHcoMYedk78+/9bNYbmXK/pAi94RuH5gt/NymCPxVjT3FeaOdRdd8WdbrUo0k2/vR+if81jz5WR06T1ruawYE5HvRWRTMY8hhV4zEcgBZpV0HGPMdGNMR2NMxzFDurkqXK81c8FKAjJO8l7i6VKH10tKPFNmL69QIik43sBGkJudxcBG5rxEVlySKriF6u4FYysiID1J93vzY87IYefkr2sGuCt0S7i6D9RZxVMBZxVRrtj6rbhYXfFnKyI8MvxibmwVyE8fvkhuTo5Tj6/s57LblMaYS0v7uYjcCAwC+upq2BVTkJxm3ZDAuPnpfPHS3cUOixeMng1tJuclnpk/rCdI8sq9JlfBqFhQbgb1qgSxJzmDyxuGn3PLsbj1vg4kZbDuACzc7ZkLxhZ2TfcmLFz5LS27D7Q6FGUBzWH2K9qyMKhHWx56e3axi7RWlLPX8irveoQlLTzr6hmT7ljao3uruoSHHOSlN++mww2PUjmuulOPr8pmSc+YiAwAHgR6GWPSrYjBF9i7Js7/Rs+CmLft3H0X61aPKzaZFAyLl5RMl6/fztbdKcxYmUWlMOF0hoGgbC44tt2uVatLW1HbU/RoVZeZ/7cStBhTRWgOO1fRXPTg5C84dfSgU/tBnb3zR3mLKHctPFvced3RY9uhaS3euyuOcVOeo/k1DxJbrZbTz6FKZtUS45OBUGBJ/jYNq4wxt1kUi9dJSk7lpqffJ+VUMl8Nt83QL6lp1N7Rs6LKSjzvPnoT1zzwOp9fE01cVBBJqTlc83kK7z022q7PYFViK4/w0BBiA9LIPJNOaHiE1eEoz6I5LF/RBvYrmoXx9s87mTc6gTsXOW8kx8o1u9y58Gxx53XX5IDw0BCmjuvDHdNeoMnQ+6has47Tz6GKZ0kDvzGmsTGmjjGmbf7DL5NYRc1csJKdu/bY1Xs1c8FKeteBhxccpU9dnLZmjiP9Ft6wtEWBq7s3YdNy3ThcnUtz2P8UzQULtqQyslUQVUOyPLYftCQlNcpbtXSFOyYHFBUWGsyUcZewc+4kjh3422XnUefyhNmUqhwKCpnalQJ4LzGF9m8eKbNpX/KyOZmWBbnZdhU/9iQeR5pWvWVNHoDuLeuStitRZxopVYLCuaDtG4eZ/MtJ+jcOIiU9w2NnS5ekuEZ5Z08cKA93TA4oTkhwEG+Pu4R1n00iJye77Dcoh+lOyBYrb+9UQSFzT896560JVnCs46fSqBoTeXZUbNn2NKYMimTcgjT6NKtc6q1Be4fFK3rLwNvW5BER+jSLZfuGlTRqq7N5lSqqcC54ZdZiOLCObi1izj5XWpO9J/WOlnQr0spNwK28NRscFMiL13Vk4vRH6XnLkwSHhFoWiz/QkTGLlWfKcllXaEWPtXz9dt5fd5o21SHX5NGmOryXeLrUqypXD4tbMezuqJsua8OuH7/U0TGlylDekRxP2hatpBH70j6TJ++x6wyNasfxyKDG/PrFFKtD8Xk6Mmah0ppCi7tiLK2QGXVF1/OOVdBkP3GArcl+YnwOG8posnf2jCV3H98VAgICGHxhPL+tXkrzi0td7UApv1aekRwrmuJLGokrbcS+tM/0yqzFHj8RyVGtGtak/+4/+G3lt1zQ1bfXyrOSFmMWKm3KcnGzDUsrZOD8LYYKnivP8LqzhsWtWpPHVa6/9EJ+eG0uZ1p3Jjwy2upwlPJ67lqyoeg5iyueKnIr0qoZllYY2acVy95cQGrLzkTFVLE6HJ+kxZhFSrsSM8YU+4+8pEImKTn17DIThY8VEhZF0klrRqG8YemK8hAR/vPP9jz95Vv0HPUg+csZKKUqwIre0dKKp4qM2FtRTFolICCAp0d24f5PJ9F77DNWh+OTtBizSFm9U+X5R17Ssajd3OHkUJEGW1+9YmxWrzpXNz/J4mVzaXvJUKvDUcprWdEUX1rxVN4Re2+biOQMteJjGNauKsuXfkmrvsOsDsfnaAO/RUpqCv1u9ZZyT6N2xfTngsbUKV8tL3eDrTctXVFeV3ZpRtzR1RzYvtHqUJTyWu5essFT97X0NkO7NuX4xmVkZ2ZaHYrPEa+aIbbyTS8KtmIKpobf0/N/U8OLLmHhrji+Wbycw+kw94Z4u1fvL3zLtPDK/Pau/O8NzmRmcetby2k/6jGiK1e1Ohyf1rxmJbo3ifONe8JbFxhO7LI6Cr/k7Lx65b2TOXg06bzna1Urfns5X7J++z6mbg7josE3WR2KxwsKFEZ3a2BX/tLblB7GE2YbFlxFPt03gvFfnyQu0v5bCFauyeMu4aEhTL2tF7dPe5oLR04kpmq81SEppUphb161ty3D1wuu0rRvWoe0hUutDsPnaDHmYdzxj7yshDNzwUoubwjxIZlc1zqYD9YmM6pjzDkTDEp6vycUk+4QFRHK1HG9GDf1GVpe+xBV4mtYHZJSfqusnGZvXvW1iUeu0rx6GCcO7ye2RoLVofgM7RnzQ6UttFgwKjawEcSECaPbhjJ/82lE5OwIV2nv/3rSeBI/fOK8hy9eSUaEhTB1XG+2fP5fjh8+YHU4SvktZywe60175lrt2u5N2L5yodVh+BQtxvxMWQmn4DZjCNmcOJPH0fRc2lSHi97cf84EA01YNuGhIUwd14e/5rxE0qG9VoejlN9xVhHlyxOPnK1JnWrkndxvdRg+RYsxP1NWwimY5TTiaxi1MIhRC4P46UgkrRrVIfHDJ+jfubkmrCJCQ4J5+7Y+7Jr7Ckf3a4O2Uu5UVk6zZ8siKzcD91bhOclWh+BTtBjzI/YknNJuM2rCKllIcBBTxl3C/oVvsmfrBqvDUcov2JOT7LmF6a9LVTgiKjyU3Jwcq8PwGVqM+ZEpXy2nQ5VUKofb/trjooLoXQf6jX/VroJKE1bpgoICmTz2Eipv/YJ137xrdThK+byycpK9tzDdve6ZLxjcsR5//rzA6jB8hqWzKUXkPuAlIN4Yc/6iLcqpvlq2nuPHzzBvx34qRYYBcCLlDLHBOXbNHvKXmZKOCAwM4L5hXZi3cjvffDWVjkNuITBIJy37Ks1h1iorJ9m7ZZEvTjBytRpVIjGHsq0Ow2dY9ltCROoA/QDtenaDpORUYiMC+eyaemcXcDXGcM0DrzNlUIRd2xZpwrLfkK5NqbJpL2+9dR9d//UUEdGVrA5JOZnmMOuVlpOcvWVRRbaGU8peVt6mfBV4APD5VfU9QXFNrjp7yLV6tqrL1Fu7seq9x0g7rc2uPkhzmJPY02RfXs5uq3DG8hm+xJs27/EGlhRjInIlcMAYU2ans4iMEZFEEUmcPk//EVREcU2us5euZt6yNYxoE8Ff+48xsm2ENuO7QJVKEUwd0521HzzBPt3P0mfYm8POyV+ff+um6LyPKwodZ/aB6Rpk5/ti5Q5qt+hsdRg+w2W3KUXke6C4ZcknAg8DdjUZGWOmA9MBv9ib0hWKu0KMD86gdfVAJOcMudlZkH3G57Yt8hQxUeHMGNeLV+d+wrYTB2l2cX+rQ1J2cEYOOyd/6d6UxSpc6BS0S5S2y4e93n30JqfdVrS398yfnE7PpnHNOlaH4TNcVowZYy4t7nkRaQ00ADaICEACsF5EOhljDrsqHn9WXJPrgaQM1u2H9xJTiA0P4MSZdOKrVCJBm/FdIiIshInDu/HGvFVs+TmH5t2vsDokVQbNYe5RXKEDOLwtkbO2NnJ275mvSM/U5n1ncnsDvzFmI1Ct4HsR+Rv+v717D+6qvPM4/v6ShIAJ4RbuSKkIKFIERGOlUlSqdNFBbNlp7VbsdkdLi9vujlo62+l2dsZuW9fubOsWB7tM3RVFWLRa7IqCCojKxYqCglIhigQFRAJi7r9n/8gvbYgJ5HLO7zmXz2smkwu/5HzOBL58z3Oe8zxM0ZNI4Wlrkusvlj4J+1/iH6f15hfrK2HYBWrEQvb3s6dwx4PP8c6bIxkx5jzfcaQTVMOC01qjc+0Dm8jrZixuNlLW0abn8NGPeHjtJoqsgUfWbupS43SquWdprpfHKPYdIVG0zlhK+VjANYxJunF065fKqFizmA8O7PMdRcSrtqZQfH5obZceLPrvx59nQEE1dXV1lBZUd2kumtYg+6Q9+w+T12eo7xiJ4n0BJOfcSN8Z0sjH1V5Qtw3irrB7AYvmX8537rkLu+42+g0a5juSdIFqWOe1nEJR35Dh2PEqFk5tHHXpzC3BplGxvJoaFl9TxE2//7hLo2Na0ueT3jtyjJLh5/iOkSgaGYu4sEaTTne1F/Rx9TTSyboX5POzeZewdcV/kMlkfMcR8aLl9ms3zPocCy4dQNmYxucmOrIcRVPNWrTyWQYUVDN7bAFjS/OYPbagy6NjcrIVL5Qz6vzP+o6RKN5HxuTUwhpNOt3VXtDH7ejTSE0LLP7rt6/jB79+OJELLfYrKeKGi4fy5JrlTLjyK77jiHjXlV0+mmrWQ+UH+PhEFbeW9eD1Q3WUDTfu3lJF1aadqR6RD9J71QWMO0NzxoKkZizCWnvkO6yGpPnq0s65QI/bmaeRmgrr9+9eQeXBisTe2rzm4tGs+691HHq3jAHDP+07johXnb0l2LxWXvs/h5g/tZSp4/r8+c8XHK6EYecGFTPVdu87SNFg1aqg6TZlhOVyhfzmI2FBH7ejK2E3FdafzOzL9l1v8avZ/QO/tRmlhwnu+PolvPLoPb5jiMRW85pVSC33bj6mCfch2bSrggFjL/IdI3E0MhZRuVzbpvlV5U2PbibjHI9c3zuw43b01kNTYX2+vIrrx+fTv3tt4A8XROlhgsLuBUwanM+hincYMHSE1ywicdOyVq65eQR/vfw4K+78XuKmNkTB2tcP8tnpE33HSByNjEVU0PuqtedYYwcW8vlhtZQWVAd63JaTdJveWrsl0VRYZ43twarXjjH/oh5UHj8R6HZNUXyY4Prp57Jj9VLfMURiJ5e1UqC2RynZxY4lQBoZi6iuTGTtiJZXlbPOhvtfrmLiL98jP+8vvXrQx21LU2F9fOdHXD06nyHFeRgNgW7XFMWtTUYM6ktxzUvU19eRn1/gNYtInOSqVgo89dJblI6Z7DtGIqkZi6gg17ZpPjm/5bB9y6vKsjGDWXCpv9X4mwrr/sPH6UaGu16oBiAv7wQD+/bqcoGN8tYmMycMZssbOzjrvElec4jEidYBy50X33iPIdO+5DtGIqkZS4FTzY+K2lVlUIW1rQY0ylubXDh2GKueWq9mTEQiaf/xDBcM1iLVYVAzlnCnWx4jqVeVbTWgUWs+mztzUF+qDm/zmkEkLKcaoZfoq62rp1L7UYZGzVjCRXF+VNhO1YBGvfkc2lv/JCWZovQEs3RcbV0DhT3VRIdFT1MmmI/NwKMgl+uzBc3QU0qSPFF8grktUVqDMEqWb9jJmVNm+I6RWGrGEiyNj3yntQEVibI4XSA1H8GTv6hryFBU0s93jMRSM5Zgp9sMPInS2ICKRFmcLpDiNIKXazv3V9KzuMR3jMTSBJUEC3N+VFQn40Z5gr5IGkX5CeaW0jjHtr0a8os4o7iX7xiJpWZMOiWqk3GjPkFfJG3icoEU5TUIJfm8NWNmdguwAKgHHnfO3e4ri3TM6ZbLCPI4URx9C5PD+Y4g7aQa1j5xuUCK0wieD86pNoXJSzNmZpcBs4EJzrkaMxvoI4d0Tq6G8qM6+hamisp6zvEdQk5LNSx54jKC50PlR1V8WN/dd4xE8zUyNh/4qXOuBsA5d/A0r5eIyNVQfq5G36KmZ0lf3xGkfVTDEiYuI3g+1NTVU1w6xHeMRPP1NOUY4FIz22Rm68zswrZeaGY3mdlWM9u6+FE9Eedbrp5WjNOj8EHZuusdCgee5TuGtE+7athJ9Wv5EzmOKCJxEdrImJmtAQa38kf/lD1uX+Bi4EJguZmd5Vq5Ke2cWwwsBuD5X+mmtWe5GMpP60TaF3a9x+DzrvAdQ7KCqGEn1a9djzuO7Ak1s4jEU2jNmHOuzaV6zWw+8HC2cG02swxQChwKK48EIxdD+WmdSLu5/BjTrh7jO4ZkqYaJSK74mjP2O+By4FkzGwN0Bw57yiIRk8aJtBWHKsnrP8J3DGk/1TBJjfr6DGirtlD5asaWAEvMbAdQC8xr7RZlS845zPQXIunSOJF26bOvM/rSv/MdQ9qvUzVMJI4e2vAGZ06+0XeMRPMygd85V+uc+xvn3Hjn3GTn3NPt+b4F9zxNJpMJO56ETBvxnsw5x6sVVQwcPtJ3FGmnztYwkTiqacjQq3d/3zESLVZ7U/Y470pWbdrtO4Z0kTbiPdkzr5TT59ypvmOIiIgnsWrGxpR9gftfPMDR4x/7jiKdpI14P+mB5/Ywbto1vmOIiLRKd+DDF6tmrFteHpO+fAt3rtzsO4p0UhrXDzuVvRUfUFM0hPz8At9RRERa9eb7H9PjjOQuKxQFsWrGAEqHjKC8poTDRz/yHUU6qGlU7IbJjf+ob5hclOrRsUwmw4+XbeLC677lO4qISJt6FvemW16e7xiJFrtmDGDC1X/LXb/b4juGdFCuVu+Pi5XP7aL0ojn0LOrlO4qISKucc1TV1PqOkXi+lrbokn6DhrHteAENDRny8mLZT6ZSGtcPa0smk+F/N+1jxj/c5juK5MiGl9/k0k/FsuRKiu3Zf5iCQWN9x0i82FaGMy+5lt88sZqbZ13gO4q0UxrXD2vLvzz4PONm6/ZkmvxhdzW1Rw5yxSTtPyrxUV1bT/eevX3HSLzYDiuNPr+MjbuP+I4h0mH7Dx1lZ2UhQ88a5zuK5ND0625k0TPlHDtR5TuKSLs9sOFNRpdd5TtG4sW2GQMoPruMzTv3+Y4h0m4NDRlu/e1Gps1b6DuK5JiZMXXeD7l9yTrfUUTa7f2jVfQs1rzWsMW6GRt10RdY+eIe3zFE2u1HSzcybs536V7Yw3cU8aC4d19q+oxib8UHvqOItEvmjP7ahjAHYt2MlfQr5e3aEvYfOuo7ishpLVv3OpX9P8PgkZoMm2bnX/0N7lihtRIl+nbsqSB/4Nm+Y6RCrJsxgImzvsF9a1/zHUPklPYe+IBVuz7m/Ku+5juKeFbUqzc9x89k5XrVLYm2da9VMGTsJN8xUiH2zVj/wcN45f0GqmvqfEcRadWRYye4fdl2pt7wA99RJCI+M20Wyzfv9x1D5JRefftDhp99nu8YqRD7Zgzg3Ku+zi8f3eo7hsgn1NbV8/37nmfK3O+SX9DddxyJCDNj+MXX8OAz231HEWlVdU0dx7uV+I6RGrFaZ6y0V+v/mZVOmsyaLY/jzhigiYYSKT+//2mmXL+QgcNH+I4SS8U9YlWiTqmkZ8FJNWzqjC/y7L2b+WrRQI+pRFq3fe9bTJgxt83/d+X08ru1f7zLtBt7tJjZTc65xb5z5Erazhd0zpJcafw9p+2c03a+kJtzTsRtyoS5yXeAHEvb+YLOWZIrjb/ntJ1z2s4XcnDOasZEREREPFIzJiIiIuKRmrHoSdW9eNJ3vqBzluRK4+85beectvOFHJyzJvCLiIiIeKSRMRERERGP1IyJiIiIeKRmLGLM7Mdmtt/MtmXf/sp3prCY2Uwze8PM/mRmC33nyQUzKzez7dnfbSK3jTCzJWZ20Mx2NPtaPzN7ysx2Z9/39ZlRwpOWGqb6pfoVJDVj0fTvzrmJ2bc/+A4TBjPLA/4T+CIwDviqmY3zmypnLsv+bqf4DhKS3wIzW3xtIbDWOTcaWJv9XJIr0TVM9Uv1K+iDqhkTXy4C/uSc2+OcqwWWAbM9Z5IAOOfWA0dafHk2cF/24/uAa3MaSiRYql8J5at+qRmLpgVm9mp2uDSpt3OGAfuaff5u9mtJ54AnzewlM0vTStaDnHMHALLvtSFjsiW9hql+qX4FSs2YB2a2xsx2tPI2G1gEjAImAgeAu7yGDU9rO7qnYZ2Vqc65yTTe3viOmU3zHUiko1TDVL9Q/QpUvu8AaeScm9Ge15nZvcCqkOP48i5wZrPPhwMVnrLkjHOuIvv+oJk9QuPtjvV+U+XE+2Y2xDl3wMyGAAd9B5LOUw1T/VL9CpZGxiIm+4tuMgfY0dZrY24LMNrMPm1m3YGvAI95zhQqMysys15NHwNXktzfb0uPAfOyH88DHvWYRUKUkhqm+qX6FSiNjEXPz81sIo1D3uXAzX7jhMM5V29mC4DVQB6wxDn3mudYYRsEPGJm0Phv7wHn3BN+IwXPzB4EpgOlZvYu8M/AT4HlZvZN4B1grr+EErLE1zDVL9WvwI+r7ZBERERE/NFtShERERGP1IyJiIiIeKRmTERERMQjNWMiIiIiHqkZExEREfFIzZgExswazGxbs7eRnfgZfczs28Gn+/PPP8fMXjCzGjO7NazjiEi8qH6JT1raQgJjZh8554q7+DNGAqucc+M7+H15zrmGdrxuIPApGjd6/dA592+dySkiyaL6JT5pZExCZWZ5ZnanmW3Jbhx8c/brxWa21sz+aGbbs3vaQePieqOyV6Z3mtl0M1vV7OfdbWY3Zj8uN7MfmdlzwFwzG2VmT2Q3sd1gZue0zOOcO+ic2wLUhX7yIhJrql+SK1qBX4LU08y2ZT/e65ybA3wTqHTOXWhmhcBGM3sS2AfMcc4dM7NS4EUzewxYCIx3zk0EMLPppzlmtXPuc9nXrgW+5ZzbbWZlwK+By4M+SRFJJNUv8UbNmASpqqkINXMlMMHMvpz9vDcwmsaNdn9iZtOADDCMxu02OuohaLxSBS4BVmS36wAo7MTPE5F0Uv0Sb9SMSdgMuMU5t/qkLzYO1Q8ALnDO1ZlZOdCjle+v5+Tb6S1fcyL7vhtwtJViKiLSWapfkhOaMyZhWw3MN7MCADMbY2ZFNF5hHswWsstonJQKcBzo1ez73wbGmVmhmfUGrmjtIM65Y8BeM5ubPY6Z2fnhnJKIpITql+SERsYkbL8BRgJ/tMbx90M0Pgm0FPi9mW0FtgG7AJxzH5jZRjPbAfyfc+42M1sOvArsBl4+xbG+Biwysx8CBcAy4JXmLzCzwcBWoATImNn3gBpVCocAAABNSURBVHHZYigi0pzql+SElrYQERER8Ui3KUVEREQ8UjMmIiIi4pGaMRERERGP1IyJiIiIeKRmTERERMQjNWMiIiIiHqkZExEREfHo/wEBPVAmdsCO2AAAAABJRU5ErkJggg==\n", @@ -679,7 +613,7 @@ " cluster_std=[2, 2], random_state=2)\n", "\n", "# Training a classifier\n", - "svm = SVC()\n", + "svm = SVC(gamma='auto')\n", "svm.fit(X, y)\n", "\n", "# Plotting decision regions\n", @@ -985,7 +919,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.6.5" }, "toc": { "nav_menu": {}, diff --git a/docs/_site/site/user_guide/plotting/plot_decision_regions/index.html b/docs/_site/site/user_guide/plotting/plot_decision_regions/index.html index 43ab92baf..4ebda4905 100644 --- a/docs/_site/site/user_guide/plotting/plot_decision_regions/index.html +++ b/docs/_site/site/user_guide/plotting/plot_decision_regions/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,30 +930,30 @@ @@ -1023,10 +1031,12 @@

    Example 3 - Decision Region Grids

    import numpy as np # Initializing Classifiers -clf1 = LogisticRegression(random_state=1) -clf2 = RandomForestClassifier(random_state=1) +clf1 = LogisticRegression(random_state=1, + solver='newton-cg', + multi_class='multinomial') +clf2 = RandomForestClassifier(random_state=1, n_estimators=100) clf3 = GaussianNB() -clf4 = SVC() +clf4 = SVC(gamma='auto') # Loading some example data iris = datasets.load_iris() @@ -1055,16 +1065,7 @@

    Example 3 - Decision Region Grids

    plt.show()
    -
    /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
    -  FutureWarning)
    -/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:459: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.
    -  "this warning.", FutureWarning)
    -/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/ensemble/forest.py:248: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22.
    -  "10 in version 0.20 to 100 in 0.22.", FutureWarning)
    -/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.
    -  "avoid this warning.", FutureWarning)
    -
    -

    png

    +

    png

    Example 4 - Highlighting Test Data Points

    from mlxtend.plotting import plot_decision_regions
     from mlxtend.preprocessing import shuffle_arrays_unison
    @@ -1104,11 +1105,11 @@ 

    Example from sklearn.svm import SVC # Initializing Classifiers -clf1 = LogisticRegression(random_state=1) +clf1 = LogisticRegression(random_state=1, solver='lbfgs') clf2 = RandomForestClassifier(n_estimators=100, random_state=1) clf3 = GaussianNB() -clf4 = SVC() +clf4 = SVC(gamma='auto')

    # Loading Plotting Utilities
    @@ -1145,12 +1146,7 @@ 

    XOR

    plt.show()
    -
    /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
    -  FutureWarning)
    -/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.
    -  "avoid this warning.", FutureWarning)
    -
    -

    png

    +

    png

    Half-Moons

    from sklearn.datasets import make_moons
     X, y = make_moons(n_samples=100, random_state=123)
    @@ -1172,12 +1168,7 @@ 

    Half-Moons

    plt.show()
    -
    /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
    -  FutureWarning)
    -/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.
    -  "avoid this warning.", FutureWarning)
    -
    -

    png

    +

    png

    Concentric Circles

    from sklearn.datasets import make_circles
     X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2)
    @@ -1199,12 +1190,7 @@ 

    Concentric Circles

    plt.show()
    -
    /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
    -  FutureWarning)
    -/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.
    -  "avoid this warning.", FutureWarning)
    -
    -

    png

    +

    png

    Example 6 - Working with existing axes objects using subplots

    import matplotlib.pyplot as plt
     from mlxtend.plotting import plot_decision_regions
    @@ -1221,7 +1207,9 @@ 

    Example 6 - y = iris.target # Initializing and fitting classifiers -clf1 = LogisticRegression(random_state=1) +clf1 = LogisticRegression(random_state=1, + solver='lbfgs', + multi_class='multinomial') clf2 = GaussianNB() clf1.fit(X, y) clf2.fit(X, y) @@ -1234,12 +1222,7 @@

    Example 6 - plt.show()

    -
    /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
    -  FutureWarning)
    -/Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:459: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.
    -  "this warning.", FutureWarning)
    -
    -

    png

    +

    png

    Example 7 - Decision regions with more than two training features

    from mlxtend.plotting import plot_decision_regions
     import matplotlib.pyplot as plt
    @@ -1252,7 +1235,7 @@ 

    Example cluster_std=[2, 2], random_state=2) # Training a classifier -svm = SVC() +svm = SVC(gamma='auto') svm.fit(X, y) # Plotting decision regions @@ -1274,10 +1257,7 @@

    Example plt.show()

    -
    /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.
    -  "avoid this warning.", FutureWarning)
    -
    -

    png

    +

    png

    Example 8 - Grid of decision region slices

    from mlxtend.plotting import plot_decision_regions
     import matplotlib.pyplot as plt
    @@ -1289,7 +1269,7 @@ 

    Example 8 - Grid of decision r cluster_std=[2, 2], random_state=2) # Training a classifier -svm = SVC() +svm = SVC(gamma='auto') svm.fit(X, y) # Plotting decision regions @@ -1310,10 +1290,7 @@

    Example 8 - Grid of decision r plt.show()

    -
    /Users/sebastian/miniconda3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.
    -  "avoid this warning.", FutureWarning)
    -
    -

    png

    +

    png

    Example 9 - Customizing the plotting style

    from mlxtend.plotting import plot_decision_regions
     from mlxtend.preprocessing import shuffle_arrays_unison
    @@ -1498,7 +1475,7 @@ 

    API

    diff --git a/docs/_site/site/user_guide/plotting/plot_decision_regions_files/plot_decision_regions_11_0.png b/docs/_site/site/user_guide/plotting/plot_decision_regions_files/plot_decision_regions_11_0.png index 414c4a8ecffd7f60d39cc78ef68c1b8389a83cad..f18259672a849e0baf97ab3c676c5f6d1b3818a8 100644 GIT binary patch literal 45748 zcmY&=1zZ&2yDo@yD&5i`-3@|Dhb~IP(zSGVNJ~jKNQ-ngOC#M&cS*<6dB^`b=iYPq z{dQ))-FN2eH=cQ3!au3VVq=hDARr)M%gcQLAt1a^MnFK^Ktln(xzxOQ0!}ZTq~*V$ z0e{|TCSgDs-Cj=12>}7~^}i3|Bq-RZHUp)W8(4W1#xsD2(Y18OOfeK$Qjfu-zmc5cj zqY1+E;r-Bz_@Ar10OzzIJm6CLO(rAoi*y*CT#@`g8QPbVmaqTGp#3kA!p}-9E(IW~ zkvB2RDqjP**l5We-}MN<2v>ys@{*m?a{WR3q6)=63dNoU)6fKvhu(^)DodRX z%S?WPryR>=0B9d&)ob=`fyB8_MFPlAUE%Bf)uS{EzYdEFojrqn%gOJ*Uss==VmS*E zKC@i%7EJpVe2D;A)qt#|{f=PTb}#BTv3o|Zc32AZK?*a>1>X}vl85K|8K8ZZf-iv} zt0K_;Rk?;zyTIcYmP_RE-w8aIPVM-Kp!5XLh(ht(g6X#Qqbh}B^F)w~La|K2v`oR5 z`ja*nJzqbRg6ZeC1qwdUhN48!K>M|b>`Z$>g2=Cg-(M9T$NZnSF=)oMHD1`^rw|ge z6iC1yACp)Lj*rjvv;F+q**qqb8ASbW^(0kU_`9zgR(`b>pn;FhSK`~*e5R7asaXpg zf$pAV+9iYb8#^B|9!;9m6@0)fb;NTAivoHqb+CqK+k)w8j(fk4EOp(<1KSG4&*}=j zNOmtg_W#If=*NVFola;E&7yfDGITUa)n{W*zknlP+Do41oU~#6B)t(ac;Wlf8A0%7 z1xj@D=lb!s!|V<6rw*()v)3!}9TlDs+*5o5a;_1#L+*E;Kk9do$hdCxv$1Ls!SFVL zn}t)SHz+As6bPf&ZGO0?h(9nQQQHx4Tp&}s9WUQ-1i>Fs8yBv9Uf5Tk+^73%AhH^~ zj7E*~J}Emmzi*+M+ZDBYamsV5)R2Ao8LShHNYN1~_uN1aKIbQEZ#Erv2lp+_Za)Th zP^_-r>i)#g-0-RxgGz#v9?0e3+r7z|S}#(3{AMNL+kNxzFu6vqpW@wluif7ex#KdU zl%H+)R`_4v-!$=kA~Er*&|2AUtZNtOM?8B+YEQZ1oxLmSn`HOg-Lcnxq88wx=W7=# zee3oH-2KczryT3XaM}p-jDRf!xZT_l6nv35B{XP(-Is`#off99%9^+Nr&ehihT4hEgp1_V)0Lsquiuagm> z6WW3nWw^J#S;tSm>o}&g7vBqkQ9u1k7#!DLtc7K}&Symn&Mj#ljA$S1yvcT9u&;pK z`NI}MuHF2aFepTxgJBEhbt@Rwz2qy>VdJGBg}SvPiZ4iRF6}vW`bBjsg>_dJWl)4H z)X_s%Z#zG~A%8HW?4S;2`qFG*z@78k@!58tR|lj+u;mc`os)PWK*Qg)>~cyQB6zCW z+VJ|mx>xY~%&&y~hr{;j)+sw5n|dT-BnIsRrdk+B*(K2$L#rw7+DKKZLNPtq%yzFx z+32NQ5zG0x|Kuof`?JR^;->dWqKk(rk`yL_Pv6=dl3EJfe3K&(``2LSb7EUND zpYH5hHN?$UujJiBqeQ#&U!Bb8*PlH4XBRDAch+CJwc8lLMEU$R^1NT&~I#K zuYEl4?+>yV^q!;P{yb%MMT_^F&8_A8=WXJD4xXMycP-1*yuq0aNIs?>MaQ?Upa$=j z*MH91padtuPQ~X*rWuo}={;yaRMfVI1UK+kLW-kl*WpM-66F$ z^ghj*!ks#x{^;5VCTn)?LrBsNDIE_fbKCbA2H!w~<42F@hyq?e?*`9Rmf=T%wzo+? z>)J`*LE1h;+DPo$%U-CX2#1mwPFj*_nSo;+cazcW>~)J0MATVK5q*(LXSI_NDv+EK zK#Jb?zS(pdG>8cxxQ~Xd*-t_{^S&dm77V*(6}u;#2g!ozFWdYYEQwSbW6roMv~W!W zr18?$-X(`Y9RmqKd28i2y>3)vN&KBahWhX`+v(avqG0-~Lb1{+q3NzM6i{4R3dO<- zEOaYrmC+o~i1IAdCqTjDHP)eu%g(P zmuvA*H5Lqc9h{l19VxKXoR7-7p;sFHk(62m`fZ?(^gRKjalc@BwS9wC@Fntmv^%-9 z?zkh}SPw3ycT1)HicV2PneG6)^*uBb>;@@YMSj9PX*=w8m-XgA9oL?H0c_HK(P2^i zxuAwO)h+a`mlD|Sp#p_O&s_urDNNcVnfJ-#14x!FhL z=)4b7;$F$^gCb@6EkS#{ysAjtQQfPOvyo4u%>cuK=?^rS+@9LLsX~UP-|g7UfN5m| zZXW2}v*~WP?ekaEPc&eiHHd^!`+#iCveg51<<&m;&MM1yG&VNj)58n9QNqoUGV^Bd zX#AVE_B1QQ?r4oNkT%FK$@gHO_xe|Mh@xtSR`HS1m=RYvz6x7LANE2 zR$^~d?^F)ev8XrO?D4Pi0_+-Z@$3s=caL%n=D!j^f}2NwojIQf06!x)I+E|BaKZGg zLNURr{mIzr3jDb?yE``y^j3qV?w!Z3Y#Pc^$n^l>Ki_13pfrEtgrN0$(Ab~SJzJ(q zwq8Dv2ubV(!CIM=c7WSoUBGb(^K|r5`7c-I|6cr$Pa~yim8Nd(r*M3W&K;sFaiEi< zqCj#<4-Fa9!F`Hgs?eU>m53>ummZSm{gaxqyz46+G&EhiN0IFXk|SlLQ~r z{-5vk0WE)u{+}21>ct)(`adr!$e@R@;fwYw)z?IU$b{$8Fgn>h2X~6v$w+k&>f0b8 z>2pOiM7lZ4?@ZzJkk=%9pHqZ?c5YO?BcHSQ?hFaS`~2ZQ0`6X;MwK7z*8CgvP>yVh z@T8q-ZW)67UmfzXe7QhTVfy$5rwx;aR`H*(aVehnV>7m{%T=AO!w&z$X3j3%G6hW# z3Elt~4J~BYIc>>S(C_y4@!@G;^>lin-~AeVqE_9&H#zcJI_!sB2>L4zI(0M&yw$b~ z0W$JMPis~As`CcoXTD!f#}D3un~dABg50?NHJn<1WFZoae*5_}bzjkFnlWU3158rz zCf>XiRgQ|XA@I586t6$IU5r8X8D03TU435tP{HfOJAZpyE|sl{M)M8*zi%)YUdoyh z`Lo0T;$;aUVW9603Tx$_vK$;9$ZyVFcy8XY$0pU!pTr>nC|0sTkotV0|>e=@(Cnw|5R;$$%E?Gl8sT3QmeH2b?7Q`59Y;+uLt#8L(X`oq z7sz&Hy6=@gvZs2G>Q)|#MT^_m=BM++fP1;o;=r;&OO_yoUIZ}mG1gT(%u~3M!l3u6 z=$n!rYtP)ptt|L-j7dEXN}v4jK+2amce9Jnir8m;8p$`nNrNJL&N}|ic1#b=dO}+x zxP*J%bxifCV`GzZdjW16=B(C3at<5h-{Kr#a8Y=Tk{2+|A`cX{%Mu~9%e7U&!VHu8 z@qoeiLuE>E&(y-Jw()cle%q}EjdJGgAyHHNtmx<~-BG{w@Y3q772R|$xBP$KK>#X} zXE8-4LgWl>5Dp+aNN+q4mCP&7vD6%O8AB~h$e~y2jn`E5}xW8=z)Hvq)`qr1lu^) zG0H?8$LvzgX$@~3ILc=px7Edop&P{wm-_7JwfjzOpf#um1#Tsij5M2R`Y4Nk6jXU* z6S778VNXtN_v=DpKNF(;_L=;@=LC=TJE;-(azB@o=5u_{Ya5X=l3Ft=8wf_PZK#+1 zl}MXG5ACsliFb?H;-29fpEim=`SdyZel#sAhQDPO?-a#L$}-r9lkQDRw8PqKg*I-_ zdY>FfZmk9IaERYJ>Ob$McTecjJbV++f<*I9Hdg4+Y)g<=omIWEDnMfz;N(to8RM#% z2|6+mwVjPsnbI7s_|+=ja2e0u3Z58+gTK7ARxMfP$)`DPIeyO05l{}3nE5gtmn}(W zu-G%@#G>y&vYChyhly{oV4mmRYd8va^bOKx=>n!d(rlS`I31k01x`|4xY**(1_S%= zZbr>7%o#t3B;jAji|B6~wuguE4b{jC#-_(tv}LTl*>*XF{ghxxk;rjiBff(y!>MGI zzLhnDRb#@RtbN}9x-K6!vH#6<*CUxkb0ZU>ytqeZ;CHv~{Zz@Fp5r^1+iU3$+Lc&& z106BGkAV_!i_$G101P6L$)q_0miS>YGaWBypq*|5TRu*F=?Q&MSZ$}toaP`WpvAd| zfjxKZAD9a{V_19KeSLD3$L<`2H(~RWR3PabFzC`3hWQ~GfrLMpc9|-Ra;@hi716i_ zK99t3K%+{=nzHQJ?*up{Byu5c z-#;_YWhmY%I(k61(+rrvu5a^dyb_uk_cgT1VBVwqNz2GN4htWN=9ZAk z*erj*g6I74`H|yeP0^$oWK%SGX&AkTTJ>pKKGrrZ3`8fqB{uiMk=s=+Bs>7vvN6Ao z_eGHv>O5J~ikYc{U#gVG6Pq@ujZQI95=-Ss zrG(fNrqZ1WenZNLB6H3vmRmh_vDb_(cK65eoOqYJ%0M8hm;GPKp9U4TW4&78>Ugey z*tiJyR(Ip?M4Lsm1?-DJ`2qL*%aip_EFNaA5pL=eX;N6lq%n|g^=&(VRDaEn24^JCnlNj-ctU>QOGcr5bLfPVw>*}Ihtcc!k+^~)yMczjm0tKMbG@w$! z|5QrMW`0#_k>~u8A1CxWY5oC!x5lBn579JsOXKGc5oN#+Sv%>leup#*DSO5*m!ae+ zF)`{q^tWL_4nO2zrJVH8qvY^?T5b_`8xYQL=-hiRl1!3f0R;iGNcP&iugdEyw+;W1 zCa>{8O6p+Nkyq1Z#WQw?xa0#}aYQwW81Vqm7J4#D_FLpQ!&ttzyw>_}LqM2r50}LL zg54--9|4UcO=;PT>#hj#DF(eosSK%7-ctFKQ`Ok(SAa6Exg>oYlp&R^1{pc=y_01m zRM{XG)D@u^A)q0{)_7~`HOS3p!N@U6YY3vK2Z^F;XlxdLe)2)l(G*VF;R*-kNdTk3 z7T4z(BJYd5?>2~7@a45V$|(sD`xV#OVwvY9=~pGh%@9tXK)N=sK0zzOfdX5b(caIo zCY!0`Qjc58`iEWKcAs@+@B6Own~TTG?m4T8^jPl;mrAk0kw(lZ`84ZZgD>>T^*qO2 z&SHZ{t3@@H)g_-WSW&$)NSgB){#OGqO!F;yJ|T(aB#5BQVLiw}mKR)@3Z|nvq7Vd^ zT-bMY>+Q#*_FGS$C!IK}T1pd^(1pEs3&}&j;BvV=QAjX*n2+~IbYfm74jP1xv{3C@ zxEQE#ZCJS(*OEAhF7?o-agoM6gkRK?oCucdz;<`Bu7tSRqQ5`97^sxPFb~0&^)=lw z{kfc$@?omdJOx+*pG1@y4XErRV4iKDeTg-kwddyJ{5pLcHdM{3M;7tvkj&Q89z(Cm z4o12fPU<-X0 z5O}C=hMxNbeZ6=Cv04UVjGD1Lm$7|H;}x_VUC6f5DK;`uSSj}hU~j5F@v?z#6Ga%p zzrp*f4pKp-sYDd@(S{>HHFS4Je3oUbi6B#jVoU9V5c>A-l6#4h zM`v-{l0boUN{BQC*Ug5tQ)_G~Z@fH(dGF`VLR-C|%7+J9N5CH0qcL>TW`fVO9GH2x zG7_U1rkF{I7;_}V-7rsaSL2yb{SQ#?q{xnZ<}@x_AX^q-(cVhTZ3Tt|S)G=X_#cqo z`HkH9r9<4z00MKjtfH+pM@Ei;t~4Uq(H?8&Zx*B;stOj@*T?(bGWxM5=d-A% zT{k$aQHQelw#W6kQPPZgF8VYs|57GdWP2)UGstG|;~3za$d=pDFw;lbJa^jr<qWzZv{tZiw{XBe?FvH5qX>oZNlMt?H3*2u!wlgX6@6~G=wBxNAsRoYxR)G%v( zg_&2G>UaS?8KY(F4QgAEQu-b)uUNyP^LZ#+zZDhD2ew$=eUw>;Ql%atrq1g5<3Xeu zo3XnuYACO@mGrKa-^VB1;haIrjqnPOz=i0`q9S^1&!9Gtg>t^iGVdrQuZOjY?_Z1dnw-N6! zfo^K|dfg?7(%ZM^nDpH;-~RJ>$FI}CcsUwMcStzQa|`WfSVN&HSK3Tfq}(RiSs}}2 zCp8E>eFWV(8W64&Ec&f}BUa1Z@e+RbjI>3Lwm`uD{BEXfy7@bbM~OY?@2mlj62yK{ zd`{uex*gK6HA_O9$;eHgqf6M^*FRZ1wgXi_`n5b4$ucUpqQN7VLJ|RZPz9Xo>kb}r zdIYlvTppdJ-W0+DIBnYRz1r{J&Qx+Qf@lrK}hCGfWl!_=#lotQ9l8%c7 zZU#{e)z@e5`3|i*Hz$E(j+qbsCdY6`8bpee0H%vMZ=P=;Uw6^Oyn{S6n&$(J5ME8` zYl4afJ7CsWu-g`$=E@`XTi@9$zVu_24x6CW3_0?yCeewH`ZpwAzBjUF)PfNkgrSe5 z>NPp38N49*w~@&YWX2}>@>uef8{Gym!?u141MRZ7o31TkQ85exj7b@!T66o_r|x_x zfn8~o5uF4#XrHbP&yTZbzpVCT92CYmEyb2}UCE%VWgVv)RcUhd zk)_yMnk6q9n+|9CV1XrSGOu7fVpPh5J@mTuINIrYabV-HfAj6aDS>tM=dct~I9>@G zrSyBWdzSb;^QWg6|FQq*zVW5vH{rFVPK-A#zAOK1Zxu_D|MfN0P_9 zfrw2n|HOpUu|LtC{OjvR7@f<0td1v8Mc`5ves_6cZqn96N7u=JMKzBVoSLpwi`gq< zqu1A;TJwT3Pfl_kqR6mt)M9jeh}lFdX2&oDYF9wEOiX`<8}%f?K-3%i@Y*LfmXZ5V znx#~%vMmJ`qAnugykX^JTutUs@5}}7(efE}n#>%I6s^`y*AdnQq5H$qHn6?MM{zj( z{_4E4O~vbef3A4(QAF8j4q1n|_@1r?qR}=qebA38_Ib9lBzTN!Sz$e6z$Z&`?yxPh z;ZbBYIn@ER@ZlXYM-is~Lg37o6Bb#PU|EQTEd&1qhQDKru=$;PLWT2YNX_n#^w_0E z%U}{0dguo*fMJZCpmu$9q3izu=+WiIhIBq)Ih>#>pHdozo(;*X*jb#EMSKZ{RjD5rVfM1Ojmcp z|6q=dVIkLww}oYeln^sEQeHJn78`WUrE_zQm5CX1*W_{ng?HC` zY?Bwd8-1p(2K!;#Yb9S1KgQ+Y1mGt1Z+0G?y9On7J1UM+Ga$5e71l2AOW6iQ=Qdy2KMF=&?`oXax^M6p{Sgx% z$VM(CLE7UrW_fB%xDY}Tl~|RE>|VAjV$lrQ&d7+)ncm>spklve2r+v@r)+LnW||w8 z_o1?cbj(aJ9NFQ?X8|HAmu6H@*0sKBHp=C2vt}3#6^LP(+L(v7Bof9{yy2j!pOySu zExt`c#)nUA9?Qjoqf?w2SWAZL}vC`%zai~ zRdsYihI9GD1R-KJqFDn3HoP7fHL*`X1*|HFRZ+JdBkL9!Xe zc6Uo~b!=tY&D)atJVET^DknWqX)(z8O2X00HWI^?AMmd&S7v1fmF!U82y`pKp9F?s zbkC_2Pu1TWOGz*a`;EDn{~evW4#wK%A%EQy($R^{;`ruuf-?B#2Z11g#ix+b018-L zBQ>@R@5R`XcEGxc?=Yo`mH1o7Z}gt1Y}{AW@t9t1QSI*8pn0Dpwat;~AXLuOjr%qt z22S|bEX#jeRMstZlUW@|9nj@*`u2R2J7sRfH>g`Fjnz1{LX2L(Nza6BtVk|POqOEYq9Ah>F;dkewAFH}b1>rMa9^UbW1nOVWf762jOSca zLJ1yNHv=^FlItF&R0NT-4F1kbx+4M9!C&LHQNo>J10*-uU}kt8g7?E9s(0OZ$Fwyj zn7y{@+0)SOa`oWy~~FiJ$73>zV+7|mDK~ivQYNzEb6DK*{*|6Jf#&6j^3{R zlKM^p2-i=>*pedru{vXNJ1e;9Z#sL*G_npPRZYJl*@;yqZsoc!$q=Iy#gc?>tm`D5 z_pUR-Br4NflhY$?wZ2JlY{}^U$}B|IKENpWLR6{FQl|)cG@PaMma6>^WQrmId)xK< z{?k=}DsJ`e18Awk&Y; zLAo8PPIZ+r9MA3Dx}et{qSVCZwrG$cCA8UAiJOXlu`qX8wY0pA8m=0)o4~6|)Azbt zmeW^G$CT~}2bH@039)d1@#UZ5y6in?O6xp-4vJ`xF8vdNS?`&cMMCmE`w=Ylz) zRV?PSn5gT1J{)`J67iSzmaw-zS^ex(REIk~=HcYe4*G)zUTMb9+Uec^{!r%D@fP3h zjG+jN3b*R(lWhxJ5@nPfPNB}{i>0jZJY-h;10oE_I_jm8-ESo=-)XsI!&!sl3^VWB zFaEGv3pd6GePFFIQ!coJzsB&Uo2!cw(7!jJV@>w3TL<%UW|^`Io|i=s|HwKl>-D_4 z8ghqEX)QHBm=2bZlO`t)wyi#+5NCJ&d! z#=0l|A;&H4`%9i^XJ^GAIx*N)9NfsHx|1Ki7pWK?NpG-xl*DDcb zsSCnSccoZ^w}oo@?#|>hYNbm0&^9sl{wD8tf-qV+pGm)oARG*RG>pMI$S_))ByS&;q}Ce<2>dE! zO{w{MxHe%?_{wYQ#ptqcR-ml!qg&~jIuD$ua;9P8EFkkNf>(YdrXVLj^Ed8`h}O)I`SDr7;=u;XS$2;D0wx5huTx{L=!VeZBqBj=g!r4hZ!#|0U^bgnlob2`wW+>O$~`tRn8we ze6Ng414@JUGG=C>yx-Ht~k2)@mEyMl-j8`67tSh@w=6jH0Yd>wbi2JD5jV6 zQ|WM+=bf4-b_xrryV5rwEw8ZsS*x@VT)9v4x`$uFb9UM(2c=eTVP0Vox z05KY70(6>E{F2?a_B!YUCgJhLo_QhFGQ`|0jD4B0%FA8?%?FT4&4G#u&V<-P>ovkv zKQzWQ;j&8|Lz_er*Us;GqE>oJnY;`J%X#G)M=A=g)FyBU^{F4G+RXqQ@;9ssPlM$B zy*?8K&jjqSBg0~=66{IwNq+*`WV48-$|$9%k4XfDD`p>F+^ogUYVq+WZ@8p4?ghO( z*1c!A1rHV32iR@(k$*n)gTHC&_c9-|E4S;L-=L+L-KQ4Mt`!P>Z>QlfQ5MfdO283g zwn$u-??V3SAZBa!?<}y2AU+6xyd88YQVEW?DD*uA=7pqLhHG27NDtxei*9&E1YmX~!+dGi41X zlL=YQ$1*Lj$>1l^lB`AzKs2fMuvLzV+e zmr|4=jUArF8S`3)K391Bq6bw|(mG#J`JXL~_K{_6!wZy@48Su|WQvROgWMCkd%4F) zWQAkG(QZ#qC{;(4)AN~ZPXnejkKTa;6-D~@DG&~k8NUn@O%pl7`OjT@_LDn0uH8qD zH9pJgV(sW8=_Tay;oQ8eKD0X2^NQ&sDz2WdBX&oO3~fcgdpzq#){*m)txF!rwT@aS zr09%RHXKjwBQnZWQ5nT6X}McBn)`~wjf+c%H%{F0=<0>}g|CvmO>7fzk}u$HfP0KJftQym$q@11=;Ylq$V)@v)^^>^Ifvn2u94({d9j$pjJ z_Y*zV3-}vzS2rBmY1Z_NOkCHEqat8}X^*?C7v6}potnY`iZ#YKkQlRA9pc0@O1 zv3??{--Y}9Byf=B6B?IU@c1MK3|i}NaRk0EN%A2de*i>0-Sm;;OsQAQ%bykWiruQ<rraY3e~0zmd+4!CVkL!Lz;t0#C=_QrL`9* zI&P6fjweDyl(OhZU9Y?NY~34hAQ(zni7Jns>Ix0@r!@tbP1gfol|}_B^I(B9<$oxN z6&HHk_i^htmdLs}{Td^gG}^keM`JeY8;-%1HyuDDyh~yVvPwjc6rC7uQo$ zGrLIpRnM}Sn8aC1S!b@*Yg34g3w)HxslQU?zF~^u3l0B9frOK6=GnY)>F3vX!U+w6 zmg?`W?$$1$D^Y`qK!(P<_E94k_k&cH!-l8Au!v6j6so6>Bz9`~+ZfSh-lSsFZS=aFu!KG&7m7ei@f-F}nI zYiI5^=Xo|?4mpdeZ@5GymSoOk!Jt+LTvXVJ(W+>=<&8Hr;eneGbMeM^OAm@{g8tTg z&2kY_6bBVlyGJj&#cKlNhKB0H1g)iu#9O5iMJE0U1*_b|e40`-Fi`t`3-#{OEw5)A z<*ZNkr)YFDH%mfz#UpmMuZKUIF)y()ztaF8FZNNIS?JE=QK+!nlTvy6y#vnI8D7!y z5T`=V3W73jlzr_C>oR9<7bDk>-RGkM7lKsQCKEF;7j_jYordg>o0;PJsG_qhPDL+m zAiKh-j*%ok(U~4;CzVM!WFfxTGrQD2o$-F&AsSTo#MwRBX< z`f__j)y@9?yGClOQ2Nw)%`es}2cU!)VpF((+})9Lh(E~PxpcM`k)L{Ky`Uu>hwR(@ zCZy~u$Idocqg@-h>Z8j#IK@(^j=5?&EZ2OY16FaDTAGFGhOa2;l9JN%g*f4HMFBJ6P3Gh zS)6BBUu*bqNeJ+4Hh=7BVEki_Xz~gF7F9;QPB4U6A^&uorO_R+0YEL$O4r89JFDR8 zP2(mL?X2rJHAX96&5h2M705aKwA!KvB5DRMMtI|d8PJKm8gOO*I#Uxp!Fn;rwXe1P zNv!`0P3NotYugI;{p3Dm^+wp#4zVAz?&?ZKa_4cjzKA8n$=fY-4|Nfiy^>%y4}(?%U> z3^3E2eYm?DeAM+>-!qwWt+GFk{fgrIX|-cBUFUK4sMQD7Cay=#@gI(iCLgIwW+7}x z9VhwAixt}ae_*AG-K)&DUX}WpZ=e9~a{^?`0 zT{vor%hw#g^tK6k!<2K#{9s4yDY=GSM;?ELZtUTz`5qjgZ6q5m!w+`wjXjPP$;X~| zU{tsK<5zONQP_q@e02g1lV4`BZWwjF{{gs}_7nd)$?JBP}3RUWD@`aSk2SiR)dXU){Dv{RAeDR`LmtoW zpUpYQ4U2)ONSi#bANzMqO)os2#r)0y$B0tAKHZotp|qXpr}!niD@aLYKhq3rfv{G5 z#0Cc_k?C=-wyaFRJM7!$m&I2CDN*O`B&YoQ%hY1{sLYy7nJw2s@G*U_p& zmET+(o@7QIE|0D>Ir z?|h|JNd;0Kteks)nAE+riP1@wi`o0}Kz8J~)3wF>ez3CAIgQ?#_F%=YG~j$>$+IuE ztItc)8bf5ry3v_3>8a1DjxEz>`*~EH$N095+B2ee6%6G!_}dJze|azL`y|7#_EVEEZH2xfBih?rwVO39zqDBP0+FnB#dywjy8~# zb$QQ9X)ClY<#C}N7Hc9v${ku9Kdom+n&fN2L~SRE_v7RGpUN!UV(VjW8wI=G4WCD> zIB;HeBwrNwKhUV6?9o4hICB7ii~476{eA?ZUU54(<~(Ql`t@Hh%~d*I)BOE(^(rqc zk}v!f9$Q3+Xg58y?P|DeuCqfN1crj;Ff} z&*)9_?yOpc&oH#%_$bz}h?=)<>~`%AU<`0hB>xaq21w17=*+X)V2XCeaQnZl3ir#F zwu}X7DT(`Q;n-31g=9!X3wqGD{IMoE>C_>Xgn5W*hz@rgJyfRQ`1UgK1#v^`C3+pU zG9u>DG2ldXf2|X%tG)ieRRk<$-M^~j)y00*Z3p-w$cnywE7exdU$uRI$7;q06g7yX zrdpbu@!?PLM6#vY{&O>1uEj>W&`)b-OH#9ER6?4>|Kd4U(wvr81c&hebM(nPWchw+ z!T(`Ul^=Mwt@N1;xbMnb^D|lPy!Orf0igI|so+yCW^;j@t*dku^xYqrrO5!wV-zaK z3t#LdG8j>~;grkgLD$d-&g=Tq1so(4q{9-ISd-679%*NPXcM!WEnQ(b7;2UlE<7|7 ze_$BI{ZDn1l7bi)x)yHCn9mrmY=L%Yn1?)WmR-8*;`sS`h}lQ z87iVWN8$}sppg&PGU)MIpTWy?-5f=pom?>r5?!`-3!UbtLjS4T^(6{~ zw+CrWJDRwhwkK=RQ${zJP#awrX{(c47l+9oL{EgqYS4z=1Q0RO43O9Hc<7b~vWf>; zO`Xr2zwER>(9c-T2HGm|e1(#=xMKio3l%!3Lh*%`$M#Dcto4LKgw{K)RNO1mm>O!U z*A2+~&AM+s2nmp9%}ozCvwL~gZp#@KY$) z>H+X)@~Yn0{v$~uw!H>`deW{25!Z)g;_BkG7S*-CJ!HrCMQX#?ZTr5H*qchjRMv7=4W8IG(9v?QLa1Pl5B7~pjA&HAv-kT}z&$+8{ zU|Nt#KFc7s>&|=;_&G?)NmRLv(xj&W4*G%2)RHZ0)&yjn6y=yG%wi2i1v3dWd9mFA z@6}||*yg+lfWhPyNz?QVPb3H1bQ{Z?&1Bmp_FH^(|MRWQh(tal{1pS{RkTf}hDAh? zhxwqM*oe|Ug`>0r@bPUthblw$4~63!@aW`4lx1@oW=~8Y!I|-Wtgh3}_LJ_tFbq)$ z?1W<4n?)LpI}Q-{Yp($)s~-{ofTF`b<|)H(63f@#lM&M_+T2Wv3OHT2OPDFd+=5KD zNJ{_&kK&pw>(=Z!*l>H zsJal$8Z*gor0hsDs@(q}&V{pAE?z|~$R(5Atq~@MaiwXq>#3+`-lq3oo}{Ce9Z(P2 z`XA1}sfL-iGd}H1TGyaRz9w;EL%O=1&tTs5K~F{Luug!pG|-$(`#OJ$K&lfag)y5Z z3qY{zO*S~s8%coniz_2^<#}7GRXiPmH{w;-q8{! z{{q#7oCJ);nMvr=1=4d@(!c!$fL&448Ce7JABFu(8|Z`nf@`+`{kiOHmG+<8r}?5P z%RfeB_D17+Ps)8C7=wgYT}sLf^#qxW`5q4@0;8xX z^#1-RvVf2AvQ)Px?aJkG067;RiiL5xq>bJ`*&*6DiOl&L2yzq!CTIDO2lvgX%PL5- z1(R|s4XBvh^(dV?O?6L1{+jeigk8qYkIFuQo5cp8HuDM|*6L{gwqwvK#=((r{J?O2 z?mU80s*g8-fTOyq!OS*;U9xkJLMt2Ue^Bls9<4tJYxVT^aryOq*kF_G|NF%*=3#6= z0EOKop@g62LoL8O?_xFLSKee)Me=kl)TLBenXe~xD+z>3UeYk;N!P*R((r~39pr~I zehdbc5;*e4(M=pmVXD_^{?Eq5APvTw_?P9uSx|TEg<4<~fQyK^q5_N1K4`fU*_pt_ zz3`codb?7E?B#?Mg<5aq*syL<&crkkDeI-l<3}s6jyc6>W*E*Ly15yQ*Cu>PM7YQJ zk~igHBmTZS4lEtUoXAVem`w@_3FevHLKuZ*Jn+nxKSG#S4sJK}To{~%k>=~}HYW2sbeDrc8aRmtUDATTVFRWJ39+GxH(roNZPGC2wSkVI1R^4CZJ)zrbjuFJCjGJ)wj zzBW95=WV|mF|<3h2PmJKkq&s|{Z+f{nC@eScHJ}UTgyiWT9;2W+9htWMiY$-Hp7F8 z(k#8faK^aQw0F`Oy{p{hDxEvhvA{AxvHN*@`R8bLt*;l8c6@R|^<0tS=#Ks6iGx4L z4H3xnl)S1}ZN%uLE9hSTSy%9?VR0njxiz3Ak5X2d#WLKXhZQwgIGi9#bu-^`lZyyHP`h97jjDS zOxz5a!>!?ni;Iie^BQ^qW$kIn)X#VY2s-Sf@(%gER%WVlh9KZ|*!RBMm{?@yU=r6j zFddM35{Zn|E>8SC@91jDzNF-L2keXL@>AIy1ZTs-@{Qm?kfWRZ=%F>68NNBCAOjn} znsiVvGoUD6H;FU#%<&Av`N9p8vNB~oU9hre~KAO5a3QrDm^^w`9?XvsFT zv!oZC2pjmR<=yqcNJ(Z#Io%&5JBAGXmlCF&KgFkDqRRZnOB#{F&#L1Jwj-l{JQrD0 zmU=I_^b%lY%_vD{1k0zYLmiP4{)Y>ok{zq4AWebqG)X({2qYzK{C$&I_?bh3B{=oT z)U=)GRKGWXQcPW4X=oiOWl_-rVrimV{VHOhLy(BTNPv`uBAX>o?Y~pgoC$`~#o}8B zcpoe3U>0Rn{4?2bYH{e(enZAOPcnjJzbzWWQMH2!S)nw|RwFEOgadb0)V zk%#?FH+x~7@yl#{2zR`#Yh31QRomZ*AK~ZR>5)BJMzoi7XxwaRI!Cd#(qF#E)M0tn z1qOI-!{3_e-QhnUG-Tc5gC%=acI(n$YYtv7qEmf#G%VBI%sT3c!FO!{yRZ`YKLdZ` zI}P3dBY%@mzytO4>F{EYa+%ZP5J))Q8PRB%Sorilq&Ki;d9+ntUUL|w-F2UDgU!rn z_&hs}gwoch?(L*X!cyeb4iMXnV`BsG`2%myl3ekPZQf0qJf9q(eHC?k?#T zq&uWRgaPU9Zs{JnW9V)McsJh9{hsH#&iQ&i%!fU*XYaMvUhCiMw;RBOF1AVGcYCkq z7qt26*O(SdG39Ky-u3pd3QMyI3;E-QIf96@aCY;;)*SKVZ9%th7R~Gg16nFf#f2G- z2CTyri0|I7Hwk-W+yyiRXMb8ZcNyrXbK0oU_>lnY&3#Q>cx6vC;v#?|@(!s@Oe8wwZ0;JvG zy3jY?dH3BEE_zN=PcrG#3Ov(pRvLj=qWH?>cjxlJ#>f!tUCz)y`s{r-Nr|9zC((A# z;H%@jA=`}~*G~bpNd|T{lyoBj5OU`9w8-h#T`(ft(3)iG@o&HaFbV_;{-#JoOo-cK zX-vKN(=j1l4Ma)99XW~*BvnfOB|m{Vyd9xvx*do|YYdKy&%`^ZeqOnL0M8;qdz%n# z5_oG(`WQej5AA8GsWa^o{eS*l*4Q3mdO z1nM*8*!nb+z2|={6PPi<15@`m7fGE@CyGF!xo|Z>QAg+`MBGP$6i!xQh>nIbD*fW! zKfT5R>}>rG6z7A$v~Y@ue10gJr8TWAix{|uxrFG;T2W}w4D#}&L&vo}`*la>lM_0- z^Xq z5O9f(kIobXQlnnYz#LM~7$T}xZ8#_XIhO48Ytbv4N_ln;=yji5kQ}=-tpP2EwD4$O zS8$5J_44gIe;#PM=TXC2yONpS-l?vv3UzI*y6~MXlbyG+sMsy{YpQn0VmnUGIy~=j zGHKjKPG}Um6VFh$Up(x!=#6|?ByGv%$JJl^Rsd;(BnC-pC&Yk}ih(%9bUhU6VQp8X zZSkk9!=UawpNry`&1JPZUK}=wbaBVP?1kHzBrQ9K2yTEf;??`IH5tH)T2u5+oy<)b zfR93eApK6x-ejrMl@JR64_+lFYaV&8tF@6)i-z+FrUxc`gNyUA59sr}j*^Kg6ps2LS?0BxfmCWJY`i7n+9ac zRTIy)ovc2~WRoyRMTNiRdrccn`cdHyHE+uVx!|lr@vO-1*+d=5mxxy$h~Ti-9oKH# zvY{xs6xFBvTa^~L6CU!15Nc%N*cUIdbPhUG1Eb7pY77T|;h8+U<yu$`&5YUIQ~>#!d;2c;%M+tu3ZWD15$ zZLTd!L{PtKiH2l~_w; z{yC)%9^6R)rg5j8SA*vfn!aYPRr$1ojJt81M#@m#~z zaP^D;?&f2-j+?ycQ#{l|eoQv|KsJhxug_PXR1WdxoSun$V@K6lob0>K+bJi}YfOWc zG+to9GWDMuN#%e(+)Yjr#l|sHzWq+cX!lCXU1jlENe3g!+HgHB!iXSwwAO<&hMa^U zIsQ0ovBvq`_VYlX3)_| zYwg^hyGg+hxa7X(2xY?)=O?w;k9i5AM9&(C^)kYp{J2K@%NQn#z?E0u{G~vJ>cIxs zsfjNZ)g5pPZWM^+eO7IN{YA0+(hVoJR4S65*2lb?#gP&cOLvv`mmLI0Z#U*8&6yTt zReLqd`cQGK4c2hrHsw;JM-oy#mP(KUyIm-~=pyrk&mfFN1@H5#%~^JHdcS!B!4=n> zFKG_5hl}iemspWOwS|Uqp(CHrYPzCwhF^V9anE+5Xu=3y+7|8|hrE)YekCP0Y;Rk& zFnhhTMVKQ*cfzk-vILo2kxm0#-x8;wY1N-jzw`+taw-4faX7iA)_lj?73tQKB=Rh+tG$+`naR=r zHpctBdpM9Tx>viQZNUhZsJA*)p6leh9jlku{a!||age&pV}K+331WH>DaBcvAPcozy|PO!L3~g6XAoowKKpGwpjD+jl>2 zknc4|ZM<}s>zUS45fpY!esI$GbDgeCLf|0TFg|19eWGgXHTJ+NjEgsQpYNuVOrJB( zXKO`JIHzH84sFUM?fiAd(W5jLif#jeq!lh^aE6|aK;Ql~zz*=Zi#M1zFng*J`IOSR zM1=nbhF}(SnV`2iP_A%0eS42hMVLE2<*&H!Lnt2s+og)WUMLI$>3At7kt>{~H?^-e zHg(%}rN?!35cn$e)TU}`9+OQTY%p4QaJhnA@+Gdmw%EtS98a5KRvl`Y9t+t zg}jZCP(fLWq%x=@+dDNkFIkLMMjn!;hG(VoaKCF-4x}+Pc;$qt!jpLPi3mO8%Hd&! z3%%rko0U*W0+(n;5aEN>oNl4IAjD}+JLizw)ow&x?_)oj2x4%CcKlpR+TtB!_t>@# z-pidh0~@s-^4n>XMKizi!EwkuO&3Fw&_V3;4%zRD#PVeBM4v2678SQVu^K$>YNEeJ z^I7CpxtdxbrK7?BKG|7Qd)kuJ`Tlp@6e&0Za^_u+*`1KwlYQX;_sXVD>Eb4_)MlhA zLC}?Pm6Y3#g0>FKM`ERBLIXz@H2G#%L+}M@qY$Z}-NWH%SQ~)rngMlbGhXBV6ikhS z+Ft%;Eg{Ri{c-lU{KFOjFGuk~&5GM*5u(vJmV zV@i@C~^g8~zDnT#ut-W`c`>5tX|01!qwWkfnWeIi8Mp+-{qcQoWt!)XPPprf&6c{DA%K3-1>qMC_|A(|Y@#p*x_0H>Ui_Sjtg7TlgPfz3ErXRU>y-C0KM>FW_7Omq2s}yBC3()_oxHPWW_?*N2jT*a{dh^) zz4p%-i)W#h=kQl#!qf_ujsx8Nf1)K^b6DpqjUuS>C+JMoY@)?Sv6r z!7(2b^$u&$Ygg)~1nq&Z11h$UZXDf92??jyE~S3uo-1hY|IpswEq(fa+od&j&rE5- z3Sr{His;aE`a@`aixhBv4IHL-XkP0V&oSx|%aiyy$P|19s}cf)K3)AmpkZx5DD>Hw zPl8rMxWQ8;?UH^tZVFZ{T&syde+~YpzfP%R+P-jJ*sIzwnpv^^2QI?>pQ$Ll`o(<4 z=#{YV9k=(u;fQ{*CoAM2aiVcBUFzRM9uF0i`1DNs=|mAL9nVc*X8^aV%P^t2FtvV` zc|#9wRg;wEEuTa-XwG-WQ+gJ1dWYVQx| z&#W_or>IujwD-+)T;F}ND6tCANh|NV;E2&7p36wy9B905P#kUO{&9(@se>_3y*W$3nQRDQ@tKpE5Q2cW?AH{~R-nup;B zfVd}8(@$Z3B`a^vk^J%}RgkkxIojTjm%PMoGFIMrW2lR`i*a61;;=SsH=)^@3t&L~ z&etj%KG0&9Bs8+utA&0b?0zO4AOZ}Dn;^NIm%KRjW6s{*FYOzZ_qJAlP6?DahweXZ zwJ-2y4Ui9n`5Ts8^3Uw^*dveWJ200^Tj}5*hQA{(oWls>@|$7t)+qf66m$~ z86zip0}aZvc%wux`1>^z2r&R~&36|6Ax7PNApQ?jN;pR1A5chhoH7NZ-~?d7>|OX& zDi=;)rhi}?lFG3-;eZ!GbHUMFEUbxwkMo;um+fGZL}go~2e12?v~PT%ia~;r|LMA2 zU6$oP6hTT3gq|<`)>&B4!8}0IpQ8$t1}<^Wc!CC=LbbDF0F$f9ya9pt5|sfCsKU|7 z^6`9DZfU|D01>syH>#rtGrM>TeFW7EOsgzpC!`+BI%r#wjO5oU?%6K`K)@#u*fLQW zB~e+iJ`?_j@}4w!sw-p9O%LQ8yuU98WG`e2UV*0o5ZqzLNYnUb3xLCXJd%wR4WYCW z^n=`~mDK21Efv@37SucpuBU-mOasnJ zz`7Pu1SQUY!$J7r4u8fo+;1>J8F}Wkzw}zq*~XJ%_Ip*I8?t?{@}BifMKnURWyywL zMf=1u5P?Fs5P)w|wv1KY1Ei>E@U(YX z@Ga0zjBCU?5LlVNExX1?%Ss1aQ)^K|f!yiu#R7FBNzO|caA|2p0C*xcNZWA0o+zje zbnZp-%c1@3yuJkGnh%Kfj3bn%SE;QN+GWleMR0D!ywjJ<9_eCGvwD{{Jk63CW<*ms z5Ri>9FZogf@$@(QdJk6(?=DkdT;d!6X*3d(M`ar_FWL8xhl}K3WJpjx27xE4A)ngK z+7nBcM&YRZwlWk{&=257y7|`M<;Zmv^zIU_J|{c>TvbjEK3+eD6hXp zCi8!Ac8bz)&?Oci4|yT%Y{P;eJgFVMbD|dkx!Yb2o6r`hhCCs;AXe$5Wg*leZhTFe zINDd89Blszo{DW3U(LXDKG;{4EQZ_(&X^reXzK-?zg`dEC9xbwrF`+LW*M{Duj=&0 z34RO!dBvztK9Y=Geldb_Wm$rTsFRPXGht3lRSjwKpYdA|u#Q5q{(z*fEVG2weQBSH zWgoQ-gLjj+I<&XZLypjp(E%s0H$A4UHMXuWYEWChF0T>Ox%>doXc1P&Zzi+}Hqyf{ zU!UYb8s{z{9=~)!&UMNR6I^}zVak?4`%a~*GT{2XQfD%wYp?`m_t-|G{}wZNN@`|b ziB~clf?!@k;?Ls;^%pI^Q9Ffd;L)?;)%GVjuNd=T!n6m{-!V)AP^P%Nc%o3|_~gU!y<^Tsc%&SQW*X6V*W7TbqYFzGc=?#G#$I!}z~pbdzB-hURWdeb!2 zd+Wbf{eQ{*w{M+No0YV2)h`L!iLsVV!n;~L_$$Nt6{R=|5aU55Y?p`^ z{b9RWtrEC7)`U$}|L^1>+V5pwS~3|G+kOrzwxnu2-ddIq0y;xD79E$WK1e>N-?r}j z??d$Y7@Du?c%kq4sb`jQc@gXEvvsNnp!`r<4JOU!Yu!<_=2DEKM!HE&w=d94PkRQq zI+$evjYUO7D}I78yX55UFxlMnr^IHj(z`o^{_(Q6>w(|QM2n^%rQm1=L1Vyefar{Z;o0oI`_;N-kmDYxL1=Vi}Y|Bgudo?q&V6-iceTPp4-Kjr`G zKuohB0V-{LtiYo^Nn8CLIjIW@bXX*K@s#g>cOgvzx7{J1Y!LX69Iyvs|JHl%eQAs} zXCxucmh;4B2#(UnKN3Kff!J{O*t8Yk2P9%OLkuPfSA9yHXBNa@oA$PYZ$v|Y94BDB z@70V0BtdvN#LqHB{)p`h@TI_TVWvGtK(p>Vt`~{g$xCXAmskP`t)HJ%Fu2Sql^JOh z6f1f$zRl9Q)&*uPpf7kw!;#{&J!(2HtrDOEv8TSJ5f_p2AzMF}idSYpug7+4NxLSX z2XixkxqV=FmLiFC-K%xC;dX9^x8Qf_Z~7xv6yB{BOX+(0fYRD-XlKC~BtYvfqBoQB zTfb%s5X|P*FrQ!LK|Hb{9*q!>J0g+vt)z9>q5so8qf>NBUrkjRmJ7&`@Ya4;#*um8 zt${TufD#O>aM=8s&=D-Ff!tKR_KXtriGiI>B}q)0pr2(QIzcmr^#~qlW=%p2@_y&W z-J}yn+nn+NPy|)vg5L%}Swv!JSnG`=J}p%a6s&zwd#QEZL`AJGa^`uq~nMQ!6X zd$i7UKUv}JJgnxLXO&E2)r?%UzgGONGhhDpe=hNA7v-wd;{nn7lYmykMSy-1m3Z6X zQe!mtl0XTq zBd=2~`}((@ddoSR5%cI<8&j&&IkwzkIpd)%qGAu|hJF$)0x&%2Z{qYaMbGA(Ru8~B zG5Lf^_#H^@^2J~8;6<8GEA^9T#LM{0EG8DmBkrqdM;~scwP4p`h~eQw^8-q+@b$iSP< z;e}LM`aet47F1hD$W*_nzpH{#-vRNGWakl9mE72Q9eh%kV9%_zbUjMGiK%5-*)WHn zNq9!<-vym#TWVH>9~pfU=>s*$W;o%c2uUP+FYvCInLp~pN_**|J2#Qc)@{Bu)BENU zQ}o6k7+K`15u%^omb6H+vTKrI-LQItEIrCO^ZBzaT45%OwP`yU!CE?X+c-*FoCyXQ zhW`GPor=T16wvci8xnf2g6}_jsem@}-v6#@xan>9Sg8CW4!~ax@te&C%J%*uOVvRO zSCYZFhrQ~Tn4}U^{*1o(H+nj?5Pg0nd;A|U9*V85q%7^Fke}G>WcY`^ z;NspagTt2_>6kRvR8a`CDIito60g}Q1`sk=_hz9T((G~j%v{O6-~0~AFSBaZ78n5S*i6aH_boCl>+LWE}d)Wwq#WYeMajuEw{ zSuJI8H#gp1RK%QZaJ|^yq|n5!U6-#ov(G7pwT!y-xrW@(PMR&vl9BN&;Kt8ma);0u~CsBqxk!K`74YTXod7pc%MFJOfrGu$f6wZ-L^r-3{Y$K znC8=$LXgn`f~FtjTlRb&bC}5OFn^Y(d;})tPW#M9?gs>duwKM&njZGYi38?)uljE$ zZ6iwy4xJ)FGJtQKypvg%PULIB?I2z8eB$$bpKWl~$f}Y$8;mcbmXAiDuq=Usz@2`7 z;2_-{zm+AR3a7K&R6IY)K0Rbz{HfXQXnK-x(d8Ct^jTi}NqoSE zcmxI0GX~NArX(rIxxB;pF?`^a<9BoN;#&A2Lz6Gvwhx0#>>pGxImij18oHkWP(k$1 z@PcIt{I73b-K0Po**<^LalEA&;e&I^5UK{A`m1H8wIC9Li~DpHGtv$-q^DD(#7+%= zU6$N7q4uagElZ||I|Fv5lr{PQDh7`9i^S*Vid>!RLbf4zoex&wIXDk=VOn zv3j7Ce$5i~iBXvo&wuv^j~gRIAp@!RG8Gx2SBr?SybT4J0;F~XzkOd%CNr45dRw?< z*8AJ-b)GpTRjJ-T-)!q0{Pm87aS-WZx|$vi;a!A};+tl0Qt%b5A~6oldyKi`xwzs> zimn=>cmF%q0G$}~%P;mJy<0VnvG%C8X;#}sW9|Jf6}4RVOh8CT@OHwS0XeJ&vj4Sg zrJabMyc&Y${{v38mU$xh%dA6{smxz=9DmZ~Lp!YUa|bperPaSXuAw11BiBO^>@>rdw&Jk`dYHu*yYalV}R$Zy{`;bjRjup zVby}j3q)8>z@gc{S-P59(bZfR^J@#Ea9Pr}Pknr8&7kR-J&+lf9J%ru8;UhHQ*?FQ znMc@O*UiYMa%rEJz9(E=-xKxVj7G_Ze-gNSzO#4(3F7WAhdaGIYFO~o`0AQ@faJ3{ zgEe73=C|iOvpQbN|3mMVzj=S~voqAY6*f3SdXpEZBx!>42KVKKuVZaTv6Kjte{!)@ z@f(*y^)Ik14(yLKz}SVe5aA!dEZ^JplA2IGRKM#~iHI|RUkA(#s}`bUA6^P$XU&(c zfdLTwo%^pzS5)nTM*` z30#x9vaq~6FU}QP<06ln*hf6Gmt`vl3^$RBp9UR6Wq_e_<^9bMB+^g45rPL*0Pf4C z&-k538A~497FF`c>WXD#?_N3oKHkT*LE}N86}%6{8EiAg*gV`}cS7<9I$kkkqsRGs zITBeKEr*Lv*2R_VHkNr!b@T6w(?A?6b@lOM=1 zvIRXzJ+yd>Z`kK1^1cwcTHv4Fj`T@(LnS%3&(K|%1lzmp1mW5d>z;=5`xD7Lqp%hq zdFB~x?9DAQ3|jC*Pk@Q{e0@t11&GS4<`}Q5bi%J*DX<&J6H6<1Q_bO(7 z#CD^FgdAFx69uK{c%~XL+!TBXr<6A~GxRxB>`dr~yccC!zP4CDYor1RTY;RTmR1TW z@8{RA{{;*`5zDWa->~1FmgKK&VH9x+QyNJuZ^Jw~9q&6TGDBVrVM>ua+Pv zD@y54%{6=+EsLs=2}1hGnc8h|YI3$w=qgNmU5NggwwwPt1zQ$!D~-og_RCm}##4`p zl8P@PH|5m&_JE{UZs_pp9MDC)Hx68DFAcgk#mBz24|GHqRn*Ks>!(~}S`MExUhd|q z>`@_Rt#I*>wR{mG{8($++Z#6-k+R>8-ZoOtUJJ$xz}f6tAN=enYlH^$ko>+{%bFhi zSCAzoc^n780cY=IIHvBUN%JyRE|6!jgL;R^dmgW4%?oU41UuvghHw(RwYwr#_@n3k zA8nP}M-#ZCe-fvbGLmRiT1_g;1q03i54s+P`}NJ2NPc>_+gRLym3Xo5;A>5|C;9!S z_BLP2?o~(@deO2qD_$SGV{RTrzARsWy$+f^#olx^S<#kX?4?OV!6XuYT1iI;{NZfyX0hfDs z=ll1hno6HWcD2a+@D2r6A7LoAPb7e+V%qLQ3ls!&eF?JhG@!t6#yEl{J5Cv2G4bt; zuv)2+Y)eu1+UIP}uu@Y4lgcg4YsV%dwuGg&@;)VC_K+JeC8ZP;duPq_Y6>xe#Q=7d zwv0`8?kA7*HmB&ab0`-VH{0G2=R`Dr_2_b94{xW95N13Y<%@5_2`4A^5je;a!?L@q z`F}zRRH8PUl4>U=G+(@$XW&H6$>{Y+18z$dN$W7%TtYD6Zn(8s)TdlkI488({Ooug zjT1U&8AI1j&_pGyIN7prw*?nTEu*kDPD3{iPIV&M*G;g&bKaPR1 z9QhVZ8*~;$C@Zd7uO~c8Al5oQ^?Bw)|8fBN0t4D$1)fHBv*2oF4MbEX-PdQWwXmAg zC@T8&PFGF>p}%35e1YAOBZO?t`>E@4(_691VOV3<1~Y;d(Wo!71J7h{g0Zly;so|E zYQOaKWwqw#h}Z})ae8x5xVQfFdsP{ueGR*3V?YrpefKnAUA%27^Jh!24L_%IUv?V67svVKN<^-~po%(Uu!d2$dyk|xaX z;0ROUr!6UL1Hk$DC8EdI zR)<2h$g$uQrK8R6EF{rY8-h(Hqo@WL3YW^bPId`a#^=`r3d=chwk^N$kz!Ux57RO9 zy6;;_C^U}xnrAj2TeL2pac$rm;{dt^v4{b5qX+Mv6k*(pk=gP)K;YVl9<&->r|xv* z?BpR>R=2DM{o+Rbo0=8GVNf6u;Sg4SB6XlH0O4jtxgx)NxMP=|Oj=IIHZf_ye;T3&-=z>CFH~+~>3zwo~SyZJ6!^({i z$3ZEA&jlgWf|@W^@b^mA@pG&|4NenIKz~wO@%WnVs&mcW-d+zxEU%-*gp}rD@Hy;f zqlBvweTVh>U!{>IG*fW5J**%eQB7ftEJLJlu=YGEW5+}>pF1rBpgQOm75A)-6+hZj z2eCrS8j4#(LvGBggOnzFV`5^Zj8$Jq6OCkT^>XR4*?t7H={Owcfer0-*f(?~w_^1s z>(f_MmX_{k>#9DD));g;f<=9?Wk((wWOi#v^|t_ivt~AA#F6_(4oBiWJ_&cza7#2x zJf~O#4;}LIhwUWP%#qtSSrqcdj)Wk(K6hjP4FrKS@|Ete{YtL^mXl0SMv~IYL|9>2 zX?|<3=x*t2hO2}z)fsUQ>v!5#y)@$SX?TNMebD&$*xCICWa^yp_5-4kpsDl?x`sLe z^5X5H7L>V@IeZ9&$0!`#Y$~=PVu+%rUKmDy$3F2b71CIFz2pHc7g#LHJ({Zz4+`T? zE!vk33(W-{-7a-}$Sa;$0-RooQbCNZL;lp00c&>0wU@zJ0#DxO zUzQ&Y(Gs08x_c$YEiH0vJ*kJm>JoAh*#*^!^%Y8-iJD7&z0mHH>8NAr=!vft_4olF zPv=+kw(dk0$86-^&?JgO@+z;^m^js{u@hf^yegK9PIQDvpa1LsMvbs5qT5(<;`8j# z^Fj4^i{kw|={_VEYg5L&TGXpHy=^w>31D6ly1;(wQGNT?Y;0;*)kHBkH3HduOEv7& z-Hl_B1Fnf=#HTm`)^r^@$zNDy&D$LszPsm*ml0GRbpsOKaBs`R) z&NfSR8p=-{TfPW3ojFHohrc3wWul6WQBtDb`;{kdDxIF;Rhm}ajn(7znn#p~S>DVj z8Wxrr^bEmq8{E`o3?)J84yJQE;IZ_=kFsV&(NJv)02yh&3hES#hs}FxmkBl|&rdK& zG8riz>@K7an#)(f0$FilLx^53*W}^^Y0Wf2?vH6)_xA z*3#`gYZY&iT1_(NoZkEq?o&x=-|T(;hfYTy8>^OGZx z(UEU?s#!)dQbrp%&MB9!8L?ZrQ#s3(L16#<8VH)_y!++4qB(tGdofUKg1e zS?HWG`rLkQS4UXQ%K6`>qB#w1c^wsw2D99_bxY0Mh)AQ&>UXbn8I6YC5ap5!e1~|D zym=}z>g-98OHPpKIr4bCKhAg;5%tS0=f`>^{_ptC5E+is1H{qOI}W?J)WnG`eC+_~ zSRWY#K&;JcFQCeD#rbF>Kv;w%u@^iIZV9$r8mF%iq91zYMrej}-8S;9eHx2nD^z;j zgcKE9_}GdsKyBjYdtOC8-ibfbWwCU6iQScnf5;AgK7Vt%kyEfTDeh##_=aLeCI0ZM z!+ra9Ktf6B_Rd^CVMJIpo042J?~k77)&VmLujNB_xK?9L-*ZMYQEtGNnNpjsGJC+R zRGz#!8+U%E5CM+lUN_45vjX``Es{BS+owoNwJ69cV!xJH7MX`z1EG;AP}NUVwglcL zcOE4`55|W>gL}N2Y*=hqwYInnig{zz22GM(HXB;xR2Yypd3bsS(o~W5^1#ceG zA_&}&YW8?xry%^`#$11jK$SyXP*g}Kz?4$!K{8*eB=N~un|DhsQ$yCPW!hzUAC<6N`e~QeM|zcSeG8jAEsDo9CEUv`qc}-Xk^-Cp1o$~z z0NKzqop;ImdT}IYm_=;>~>2d&EVh(5MsQFcNoh4vMNcI_EGKu^?LKbg?uzI zjd0gt*XKB{jL^wJJsP*n#)YJoR)DPmCy*$68WZDQdy#0{o_B^J)lE*==90<$@K<4Z zdMXo=SEV}zCue*tc0lEV`GQ9K@pd;Gn-HmNc)+dfzLxzqXWV+u5?2N*O%i(idIl18 zgS!?k2c7Bty(Gh3!3Soeec9p2^q^wYQ@4OeHS!~0-CzEPF zOQ!mUv;2CAoxJhlJ9D2pgxh`tk)`Tnxv1Xg=)JAaopUjcOEHEG=SIks5hjub z1m|V3wsQAVt+k1s1=j%TPoc2zloa%g8S2j= z%1VW^8aWR4w^;Y`t^@uBKh;7G<%^SR4fV4wY16#OZwi`N(QK@AqwOLqHwnW{VOnm- zYVUd^&)3gbQsT|tig2a#xih`jt_>0>OFkT>DA=LXlSrKa-U;Sfp5oV|y^n>cop{m& zEXl%G#EaDOr{g%P1j(Uew5>N?E{>D9?mH7`&6x0Q^)OjWnQ+Z&YYg1JsBS{rw;R=N zLm=AsD5;?P=AU;{FV^$(kNUn+?pD4N+u-K6_B}#QayF}JOKe)sO3N7jvt(`WTv30{j6{=vrNMl2T2hk3cNe)d#mO*9BAL() zsGJ1ejOb5|@(Um5gX>U64@*)rmXpH@lg6VG1`7-pMqZYv0pTt`sKO9*c3|gWS!k~u z7DztJQ9A0?(D6W%i1%lrILN21OnLWv?#vP%*VL^3T6!cDeA%g5sG;Em$)L8Nuc@%G zAd-YeuVq_}0IB`q-fbO}o7EN;EftdvJc8jrIel>gd`ZceA5YJHmoQcQjx^54MNumf=O_3?&ZBVJn7PRa;pO zXY|y^KGjh}DWE@ynA+io#?dlwPn%I|TU`Av(&^E6xqr_OO&vJfPek`MVAe@{>jwi={7GTLw7MnOT=aRb^t@F)lj z@;#ttrVX=?kmF|@Y*9=bmXNdLZ3_S5wy)v&l7mPJ11-P$?DN=<6#1d_gyA9S5vyxw zjA5^qs^n|En-IiECh>gQ7~cUriD*~mtGQ(-*m}t{9C^~ZF3g;1h45Q+j*?@6Lda?n z@8^&5?j?P-|CskoJ!2N$txB;42M-$Xml*QhJrWL6-4&W`%K;8{psFgNBwV7{w9cuj zJoph=!#+}q!HoRw_^UD9Gv62miGl2wn?Sl_La`9^RY}5I4A&*VKfY|#l_wdFN_m)9 z+u%~Ar+S`Lm!p`4KWZ6~oala?PS1dShdDi72X1dxDo8jgq!y-%c+yQ986He^ zdnj|gm{kdj4mF01ZzMZk@WNrLPMuxNAHMf}Xww{)XnnLMQ`CzaViAV1U}Chjb~e0! zH0kR;Sw8Zx_1e=Q&yl|5!jB%OZ&0NR^mX=XR&x^L0Gv~|#g8^6R&Q%2VLh9u931>y zZtI3!e5VXg?+0f2x@hx*lq#!I6S_68NplOoBUeto1}#-(VX;JS=hDa%=0uW1i43AQ z>-+gWYmBNm{RXNVmMuN@){XI-o5+J9&E5?hiea&F`0z+whw^l58M2|tVU(y}r+ZZaWHLa1xjZvSLN<()b^ z>#)NQ5>6eMN4E8Kdx>`3682>y$Ngq2TIz3L;>wg!xh5PU281`x5K|{?UoiC@Zr6{U zwy-#v;$ii|Fb2L89eA0KPDW_`2DEatW{A|Fp>7=Nj2sRLq_zf_DzFbvF(0FTgy_n* zFH*ll^36eLnEv7v=QRdvMPwMzBAAae2hcuYUk5CMnMhaY%(CksuP{^iOAtnjX>uKG z@1}8M=b=E0%j=6N9bfOQ)4m(1^t{2u(ajtv?AuXyjmPKc`faW?*G&NK$&Cu>_&V2( zBN=?^{*gp7mk7kdxRpt=eR?-A?4_hV^mo(kbsDE{>l6`si@fy#?>BGgn<^n)J@3bt z@1MM=WubI&q(+8Inj{2Fwf2v=J=Of_qhw*BFR2S)0*g;F{+U+I_sDpMC#D*Ay>LttexF`zUW9xmeDPv&?+ zq`8zP4^|UMIi41##4qoit&1EKOl~gItK2+xUp*dlZ5}M26>a>TxphA|n;Dk#ES?fN zLmH8FAR`f9mUsZVZ4VR;!a0A972pwy71$m*+y8Eo;mO&0FgeL~7?0mFr=;1-C*qm# zcgLJL%W(S~fkZKbqJMbsOFjL{-?Jy5Va98nu1Kurp=t-}Pz8$<3qvDlDAY+IbNjS$ ze2-81Z1!{)wz3-=ziH3=r=&RN z_pgZc4W13#a$(#3Qk~Bu22ZX|2l)^mrMHWv-!IbfO0D(~_s0KQN z>#Nz$LgAYMJbrQ_ms>oJ1#4vG$v7>yQ8hp<_wH{VjL0Hg(6!kLHDFdo?K({TUHIec zO07&)o;%L@fxqmpj@sC2a~cK=UogXyxAkCSkZpPKe3Rr|YU*5dOU$s0(;OZLlgn^| zDq=Wy-D@(nP2uabPbR{O)(3(wysvS~TvOjQyo`5r6Pn4;7@`#dMG5%YBf#k$Gq2So zJ$2C?KP4sFsO!!z9*n1j7tJz6(z0#VaB6{;Tw}g7d0M+2FStEz=Wzr&ZK+%!J}D?etU+?NkQCd35vd-tVh7I)Me${kl{(;{o{T#+SzRO=bZXaL;e1)HRQjo6P9s~g3!@-TixYl)$=US>w`7kU>im& zlKRa|UnRAf9cPMB-IxyVdn>ymci;__S%!v|jVUjs!!}gXn$Sr+1?tK!tFx?bYmtGU zeixSd%Vx5Of~F4}JPcV0lVdV^vq}Xspk)H_bEZ8{s!jZa$oINw<@Txg(>|j8Lcz#IRn^-HhacE)ghET|>x z7s``|6W7KtIGt^z0a->h*}roDe_6XJnqZsmJX-CCDx{8;rn0j|(a4CJNuBY2|4koM z9OsScuYrkypUNbCiew9iW=ki~Pw6SPYn?EAxKe)h?;b?~wX^Sq-S?8-*Sq0IGTD%3 zX%5B`8GOAQfM-sBE%~Ws+DeG*IuxCQ?K0WO{jpBOprfbirYF(NX*fOp#=5Ic@Ttof zLdtrwOzp~conQgmovU-Rv=w~PRQsE*5=B`^5t;EPgf%Zx8oHjc(T~P+wj4LA`83DF zD~GHsk@t(Asg$mfhsJsSI6x<$%}b+V%eA94n{I*UXmwCS7<_YX$ID{$qArs!(w$Du zaD4}_6Y*QW`>*pNpjWBh^2IPmT*cu?DAw`OX0wCoNCx)dvYDP18OE^9SJwm7=nap7 zav&;6V)`q4TGoeV`ux5sesONf+U8t)G$MBG;~>kLB%4J{%!W!(f#28En=^-o<+V@0 zjxw80O6ll9Wf!yRHWu4)UOjrgtf_6jx_#kawAqL-AG8Oo-p?Llk`n%viE_3Zm6Ja@ zy>lN~^fb4sahbElM9W`YLJ`~pUI@U^IVDpvOoY0;w_C4hXlperc<_Qk~Qdnv>&mXP}PT@D#C6Zu)k{C6@Q2!Fd07%wXBi zJbXxNOHkf-WrabCezko0%laytx9G+`@Ux_0MbYRRBP#2+ZFSe@+nDl(2iZ#1XT)eW zF8_9wl|OE47+UAfc=*K(u#7A2su&Cv2%9{YIRCnO872!ApyO8Oo=t9>nmB%Ajg=pi zzb#&|x+HPU>e1)17~j*RWiYdF?a*IzsXO{6m=Q8)}bY?jsge;oL@SVC5+$-w3K)(EE%6h z#q+j!Q*3cXR?ivS#Us|~N_!}i)(!FfOsm=6Qmj3>qR=xKl8z@55z6|bFgo#i-Tq*? zGR7Kf*5)z#IXBzGF5RnH4bokpF`t2HsMmFF zw~0NobO{4YN9Klo93SWJu_E4$nYdA;qua->@kzgCVGp)0ZxT6dRSWxGH9csdfSs@g z7YgeRXYM?;*SM!3v`R7#(?pZ?dl44Jv-R=UkC(#o4s7X;bMJp|7@O-{=&eBzxLfXN z=%Qq{r@xH!nUgLPz&AfE*^QQYMgizxKr7`|)@I8F(R52(-X!=hvkxtxjz`kH^B67@ zPz-fF#@)N6fr$~zcl`|?Y}=eFR1M2%`ezcN-U5wu1-WLyL4}*r*51xN0(Eu}b65{g2G0SB ztPqfqdG?CizPLE|m&Zqw&TOb41LU){7`@+5UYV$+z$>gt{1LVHGqZ#!aXZ8mJXsDq z{!dG10TtErJ#bPQ1Oy2IQ5xxPMFDB)E@_tTE(sBq?o<#EVdo4&=J zGKODsrfB36difPRC(Wm2pw<6FUy}b7mpxW2cg&tQMk5ft^x`%7uaHNMduDPksKnJ0 z9Z|&_OjI$v)V^-YoGF@dL&G9T1&-^5cq4z>UHq5jY{22y=@|kqp~Z8Gv3abz#OD;w z3Z<;a)9N-Sznc~6P9qa8mScR%AQc4O2CIpQ@T$R2I&G(&WbuxUt20I;8IQC`W(EGD zV)cN$Vzuz3^gjPS)-E~np#wtN0$f!h$^#?Q5&NPe@vzobyp3+D?g*E_79bgn1}~T7 z2CqrrD+L~GNAyi?uftla31xbaOQ(70LYnYu7sYwLm6dW9xJZCdvIVIY1ft;J3Cr%& zmG7mQ(foLl*-5k@W2dLv6dAYfOW%6u$l>s6{v1WZ7q7GeY?L3LEem<|Fn&umGX!;9 znM)z_Tb*OKYyi+OwTMLsZCE_C}l~nE};do*y-Q z7eDLfg@4i|fD_iX%OqQv8`rL^S*a7PZ0282X%dzmf62^Ru{v!OrtjcQWtHKy=&Nox z-I`jTrgXaD4A&}B1u&ZYi4f>wWh%{7r(0dv3>uHL=gYd%_#^$K!X4W;LL$Y+XG>g6 zJy9oJQ?_iJoTc;MaD}?Z7sTpajEI|@qq)jmT~E*m*!dEZu&Nw63q#(V!ofnUy1HgA z@OJp>uaElv_=s_>>40slkQ)Ste;mU6chejB3<|;Q6f+-5=~(5es`KA05-sPrtxK~# zH%5IEe0-vM87?DiYJUiG(w#Sps|fU)>mq7v-Cw*IWaF-@ks?4cc&qk}aI^KHFG zW7*L2aPcMm1gSyE!tdC7i7aud*PmvrP1nbIjdr|svxph9#uh9(Rt61U$4gJ;u`uXO?bZwFi{#dkFuK+~>wH{XMM|9CVbCt?75#I3kI}=iLZ8di4Qk`*s`9Z7s-I$)*j~0& zcH`tjr@?uf>rY?nI{QHpfm=YS@Rot7cInE^!Mac*3Ck_Ybl!cD`{zmp#oBUiy_vt> zZS310?g^f^ze70QY~q;S_>z38Z2wG;sRiaPH3$sKD=D#qi>qGFUr~LTrTROrhwEN$Q$`=jNi=yf*!rplxB4BzxS!DgrIJT`wQ^A{N9?{ zX8uYT1;Yer)zv>?dtb!EB0T6jJ1b{2owCewR%qNEyI&Xg8q>sjmpnLK0Zs$b%0@N>0YO-i)-(f(k9pT*fb*|$~=pGqeIY*t{C>ajAFQZ@_2^3GTR z-pgy=t-^PG|5U%ZoX5xQo0d4|4<90^XjoZKmB7l5Yy`KHzO((uf>( zu$~xatYjAM-Op2nXiBv$G4PV>UNN{qY}8s3#hx&eYHYr-O9u*z?X{DycSuSpWG;h@( zX+}wATf=`dqq1uG!q6Jq$Utfe$eH!Eo5c>SK~aT-e8eU{XwPnn5G_LB>(2>0u3QD1 zZ2#s)Zfg>*asC%cPX<n4QA1bTwyOj|2t?BfsDoTneP@x98$vWc|X@B&9bpgl3|V zfO;XN={nNO*mX;%hZX8{BUh;C2%y!>P+PX=>H;lKx-T>zqS1NfUE}#^zgH@zK!lPs zu^NE0E9Gc@X9opC?OHoTK7-t~q4GA{O!e8W$lR>&Cm`<$kRZE-MYW^dO#mdl_=21| zQsG@hWbI)*-aig*Ys737n48sH9|2kaE8UaXVG_sP%M0WxHNBshRHA9wFk|7zS8qA_ z@{6#3=3;k$)J;53K4a}%7-^pTCp9b zk@*|}!O=h4(oykW7Vmn|6~_&Cp`aVEnvVD-pJBlUB<~s1I04{|mCn-F`Wr{(P-H?t zYxV1wDU@=c97##=H=s{k79A?DhM^G(QM4wSRbOcyON& z5A(DRtF><9q9~u51IzYTk8=C}(p#xtV^@T}HEg z^2e(@+_q~1s1K=TR-R&zlCn$a5kwtr`s9+n#M;wk;p1)YdIqkis-gR9kO^)ehDybn zM=TWCU61UxQKwt`b{wt@at>t=*UH((rMzFo7v}QM=$e-;LfGpZY)%zVC3r~fOxDNX zcfe1Efq@Kv^Pr+}Xs}c0UAkepQXl3u5)jCHK_Xw+g1RO?7Iql~Zed9WZsvuW^_MYU zdR<+sh_f^t+4!N$9V=sD`XKF<9+~O@8K6Q2E)I|>|Hg`CCfW~QkYp63eJ`~Xj{b#* z2<%J&HNkQIiZ>3ARDB@YpX>95HoXS&%x=KQ?0e_?FiVTxp$<@mQI&l?&`GFA;Q`i- zx-UTAJ|hL|Z58PG+3#HLV40HVv!RZ;OfW+rr|yt-2D|qLrc*#R)5s}MNsBt$vyyXV z44?iwX+>Wz;dl1uo`2h#lxNU8mUaasM{}wYh6tR4O_x9o>r@-BJiL~W>-^No+`Yl4dL-wNmQ@=+h%tE4fj8x?|M5do? z{06Cz&BBO|pkZcV1&-3VS_!#^oU-5fwKsWfqyAVvV#mYaBFHU-1MdP6a7 zOp_z7-ktjw1saybBo@YZ6qK=9rKJgLHCuPxe>_3}A*00W&v_Npx7J)m`2to(y70@v zbU6G{cV1GEw}Gz~BpL)-2pMUt8zuDbj<^@+3KSFEFq`jYW2Nrs#{v4n&zsg?q*iCVct^$Cvd?I~AR*GI)2zR+S6YW=AmaoPFBxI5<(g9OoQx3V;!M%$14 z%?EvM;zF#-f}}Ojr|^2i{hm{LFtgy0#M0;J+?i{xpMNvQtOXVQG|=IEn=A_y*pD5e zK=Pgv_t)hdWlj~ZymyN@DuhjM?L<%^KDohq#9P^WeSZ7x-|fj{C*4n`(F5xoU$TV<$sj%%7v+Z&zK#oLg=!J}omxc49T}AcS~z zT;5!hrC#&9IxIGk1N@;yW9f4#p^wWL?DYrZ%>LtcqrIE?tl?|CntT0yF(VE7eapA# zAM7e&&(j86r`G86$Z(y0MOcnC0bjYwaUQOKqt5F)YggHNL{ zQNYR7$bZsHq!M7|U*wZBKZ!z?<};I&I*$@Ue2rq|Bd5q5Z`6EWS+ z-2f7D;prYb*8ofXKSA#tu-aHQUr;jjWV=+1+n8yV#MAd3GTFm2GBd+t?zC@9<4ZhR zY!4irlqR0=T1Hz_=yFteCj_h3!QaLGb>|MB2DH#iR}>#VOh)ZXG=|b=^N;*lypd6) zWHj=98&i7M#`cPQ_Y0?D_gCv!8>QZ)mJJi7mV{Ga#h2_I_MAF69IXq#7GIY71+Q^C z9`VI>oZ>%fj1jJ&uM$)RDlWGcogM5LG}~U-ogev|l|pU$$7E%@a_%tUJQMF4l%0?F zXf$=*qo{28D3vFIRYtzl5$=wc{E|5&kM?}YS9x}B&(GdW%pEv*zvxRU!ey*Hh_V5u z{$q4ze(@-2%i!6L0YeS7HzSJli-!!Jzr3&NNi4Nd@o{w1LfbTK<-;R`ssZ}v`+Uh) z-fQ7v6SxV2cO`gL#`!tYxAJsj!ewVUk@Dk~lkZZ0hTzt!2EJgl=({&JEG%q`TMcjX zmqB?>Umlg8E>|P3jO{hh@xBocZSID=d6AYD%>Jk96Ulb|z*qWwrzep=5VKqWjncu| zO3G*A0n#K{_57MCYstC9d}-@@Bgw`U!Nhx1uAQfW>1l6=Wn10;^L~WWKYEG8e=9qt zE>uJX6HMRM{CXq(QghiW&gNsJ%v^<0J(vB+WQ=6Xw<9tQMu=u!E_CG$uj617@|_82 z(B6|*7$aN+NY|ZDB9nd%tiW5dm{t%Je#-y@4@&e{-;y=^-Gqix2XRzY9|1)!{sLen z+53Zk?bQr5kS2`=K~)U2*5N_;OPSn|z}-2X8p;i{52m_GvOm&(4=N|%k$!ozFpI(a z-IgDYS4$uB zg8A9>DTT;rNUWQ^+q|?z;^=5JbcQalcI~+fN?f*ML2(y5DD&{p6Gp=a^8BvI{)bxs zktbSiCjjCJk1~B}>kzgx9p=I=VWgsXrr}shg7M|cYM^*e)UvB+Chu2FHw_EI?XjdE zxcB)PEW5ga4Y?BOAwxnii1h4w`AbzmTcLWkcuo|(7?9OpN!t}VIeQWVZa8_(;^HXX z`RnGRe>~m$C5<3lA_Ae7iW5nxaVs49--f6UO1)@vX_^dnkaOq{r4^r+@2BNAcG{3p zvA^Dn_nQQ{*uwcncgz-yEK|CVYA6e3hgGN-BdZ@11hpb?BV}S^@NHdX38T-S6<&vG z7a`oD0d>Q7;hMMvbcJzugsl6w;FpRMBHZ=cKUa-)4kj})Sk%l4mp_zlCcsoyb{{{?hl_ETj8#N*YM?7i^`4O^KEJUGVMv*YfA@ zG|ASg^{ra*%`Ew|sE~Y%1@bvS0!aywi*Hu;mWQ!y%KX6e28VKiFB>>{RJH4=z2Pf| zUB#R9yR-4-DDEz$5iqo&d!TJK%snVuG#5{K^*|=t{adz}b?7#dhpgQB{0o+HS4GLm zA2m_F4ak$|^qaw=sw3MYI?d0hMEARziJcycx})9$9zPclWpO`@CD*-$Ri2*%f=OMb zSPupZR)2IGsj8%hkKS6<<1xXqm5vcGx!yj$-80sU4Fqg!W=`JEk!i`?T2O8Dwg)_eBsek*K4;wHfHP{#==Cl{G!_!z;#b{}?2&`_#~lTVresMpnnpMKtr~@@E1}@F zH^3R-13~O(tL1vYU%bJ5vo_-9#T}7l=9vziBd)#tZSTIXHe15PYrac0F405#p~l5y z>!5iQy}(Ck|)j!9;TS~<2LX0srI#6#_n1I z(56OKuGODe?|%_`H-oR}Y-zE3ctEI(vT`jB+q#2y`)x0obrzq~nkeM~nzTA-^C^~Y zl%G-br1ZM2N!JwwCrgqa45))%MkI%)38iAX#!C8uuWtKDK3+tSM&L{@IOkJ76KEuS zOh>1+g*C9fo#}5@l;?enM0SLK%{zOvggBV=%&V(Fo+vyWsW=x3le8BA_R>oG+nsDA zLuglEaHba=D@6360`ptqhx*CbylJFx1Q;6T6uF*^X;5X*BnWeyTrS?o> z2$P}8dZR(`RgK1X)89GyMOS>^Db9Tfu*L=b6g@^fSIH#ac8gG2`SfkT;-R5m8ki36^9R6a}% zko#otArSufixE0%-eK67g(yi7X?X%;QPm><{Y%P-ZF(hQLr&aI~=Z23;~Re2T-7`uzL zZ03l(%c$!r`uC{Hj>dqd$ErF4-QGNH(bW;$+UxY?=R)~|Jg(OIt(>UxbhGpewOqry z*HA(b8|*cIMcFKicjw1+i8aFq)1rs7Y%P0_m+e%uK%=~A1i#T|N3$U5c)0O>l^%WRYZcnz7uMf;LSf;9e8R+)Fi#>ihmUPF#^a2uU9j6SwdQY2VYiotHAV#P4V1R88IC98#%f9O55&J{T(Fs4RMyB7i9R8d4ygA3*m#83#I^-;G(*_sJhIYx~YUvL2u z|8RMN7kyhQKu?66w`8+^@jb8Hh$N6(dW}ewo6mOE#-x)<8gWA@k>LD5c15M zH%r5Y0Nyx))9HWp*pD{fzR49b227ngu>cKX*7QXjHJr%w#FJ<^1eQ-qThK{yewWog zC%uBabP;etQZ;{4xrk_yPYN}pwm~2&(#@cKL55O3vs=pi>Oz2%gdXApxHs*XzUa6+?UaUu#+Zq(baH( zX`|ioSUHe2Sc!r4>u+L-{(?M>%iO*4+>5PeokEVm&YAQ1>ki0cwF0ys{JE9ehbJ(M z%CRJmpNm&>xfCJ2>ZzLFiC_`jw^1j1dIjqx7K$qEUo*z6W6bqlc2_j_V6&F}hFc~{P-npT z;b+o9k?Q72%FEG(Aon{h#>&&s+y?xqi1#Tm&M-sb$&QKj+_%cXy(!957>Cmp@IN#S zPU;Jv*_r+ZvLWR*v3=!ztpwLleqYX!+`U&j!IiKkb--@jWez?A)68{(?e~bv(mSd_Wc9N+QOHYi_zrn*Vcc<&k4P0Z#xJxanS;iRj^ z6@sSYH`FXM($dE_<8ntFup#TZCHMehZkS)QE2S&lC^f4o(*^&5Kg#4vaoZpihTV&A zs$OSF>&oZ8+bienuCH9|$&~FDJH2`-bc`VU!#zNq`M?vusc^W&CbG?3*IR3Vn8|JZ zQQ|LJE_$az1@DJp|LZn`F%5Et+i15Cn060kn(&_B7c2!FZl-d|`q%3vaz zvW0ZEzY@`A7i{4o-8YH-A(`{VyS5hENYu8)HJJ&P+H5F^=wODEMMIXL6g?&h^d)mP_ihy1K{881u&p2Zks{E4mp z?8<8iI_QY*&wO?vAKAijHMLiHMq4MU8Yj2=z0J=|M9EY5bqD}_^Ne`ZB5y|NQgHHR zopme5ij)_);^9gC<#CVgqcI_93a5l88xKue!;Y>ERgoNw`Ga(-_k-*8N~a_Txgv&mO&`XgIaH7Pyi_ys|P9<~tx zABnlrPR&S^79HN^zsZ((sj3_n*;y+ePk_%q^P#C|s#nWzZGvdt|MGc}GHJ5KpZ8JN zrO5)=`VecN+t2?y?i2tg4L{hE`(-#hs-t1#bJd@<(OI7ZxQN-7NBLMJF1(sXGR>kR z{b`48Le-`JzWWteNu-4k_H@!CJ8lzX>jiu+1FW}Szpv2pMIM6w|H_$@Kk}PHumwP_ z{Qhi~;z>ty;m$amyPjUHl7L<10B~*=t_%BsSnu!3*X9bwDT{0E68Hh&G5h0tVa3h+h#=ku)YsuU)qe#Fk)9OJ4vwwcz(3 z&okN}pyss>&Nyy4?PS7oHU3w=x#Cxt2GKrl{V55+9i54-TM>tkqZdYyBAX}YJKbPz zIb729dSu1T9OnGLWvF}x*@yM3u*dY~jdRoW>uz_v#jNc6)}sK+x~9R=H!r^I=sQUo zG)qzuFu)paPbOhH`fmo`d8*O?zA_qMg1;D`yFQ?mHaNi-xj&OHQpOW?_fz{!Io)bc zbQP`IcGiiNb-@+EYb@tSU7jBt(R%MMp;4pp;CTV8j)l};2u@~Nc3q5RM8iNf2@34i zo{F-#zX(W1m!qyr6UU2E#)`3+>;V%SMrrr25t60wZnIj~&B%JtOIvqY+@-@!nPBCN ze`kbkuA#t{uYfCEKTWKd&s|YW#~+v@neF4+ck!uB01|XuTF5O?lG9-G##j!RoB6Ba z1k*cAMXw-?0`=RlZneyeyr80K=Z!(~(A6l@a6wgBZMW4!bmHKDk1RICBt^X* z2eqgQwlFb9{E!0;BNCPr&x4(^yirez5O_4>f-zt9QVS3ybJDASe^tTX$81I#@qdmd z6li$`+@LiQW7&3dK^K=LnOp9hLEegFQcb~C!WJtPt>3`>692a_z=)tO%Ex+LCxgZ$ z{jsdz_CyHRO6>XK^3(~c@NEVY(1`d$BT0b$>62MS(chl)e@eJ32T5gCD(yYnwE#m< zLpUeRTK$G>sQ2awtWXm%AhRT&SkQv2-|z(=-(CHvJ7cffmf=Nv5Zk?n1&uxXnpHCz zW5m*h8%lRE_ns=nnRax$TtPXQ`h7MFDQ_+@PH*Jpv)L=g$Sj=Cf8+=c$ojc5Jc~S% z1(jrR?=?+z2PI&q4=>si@j}&XEo5p|+4ps)K`q|?tcn7PE+*RS$WQ@ZsnPA%m zvIU4oWYqM)F+dh~%*qht9atZAuE+Ip=)1-I<)jOkTT$D+94OYSibaWLV;+ON;xUCN zeHIt}>wT94sVl1h5+~@X~jn%K)wozFE26cdr5#w&CwwwqbA!pt2q!< z^}40>f_zHNCK{>?^vyu}*UahoCTcn~XrLCz?(}_>K^+!7b@{LQxqe-EoC!0y@ZE60 zM*gfKV5pY&!^0lK%gs0YW6i4mV4leZ?K^#sAQk6lDR`ch$D-Jx8Yj;or+|5{dhkB( zW@X6v=5fJ#f|v3zq?-YBMsb&Jwp;*nDQiBxuv`G{%$v8*Zop_y)m%u^{MM=3*}BLD zpeZF~A1W&Gflc!#3;{cxN9g@(i}iVd;hsyoY1U)e`osrvWy*Yutr4i5ULUn#DA~LH z?R4|FzomE+xso7sO(=ulIUEJJ^Yi{UHO?Uw&1y<}h8`~>9}uhWhz^DTPsjr(+aG=E z;XP0)b4k?%fh&9IfUqT{GAX69JizBDKea|&tevar0M*=;D4q;1p2U`)nxrb$_TL!2 zm=BRtDl1vo1ECd9-U9chX$GWemIF>(`6;Hh{-uWlnjzq4M&ULgJe10o(7HqQA*WVF zd-idfySTh6pgfQ#*MX)|JAPmZ)Kt??LGwGZ<@a{WyiPMzo|0D+6g0W*zgv{Lixo)x z?~h>yd+{O`5(LShG0Hlm3_$<|AV5yZW_|cSl30qh@p3t4f_U zZRV({QpR=<)#KoK`AS2n8TP2%r3p{p+LIJKW@w8Zf)kI<8YW=4cMgUq`6kJ10(c>) zFq{VyTLn-lvI(s~kVaY)WpdzR67))};Ih<~>FsbD3-l5#`%moihdwZl6 z+89?^47hh2nuIgC*5orSHzqL%;sI0xSave6|0RTn zIz&7`*L7Jgt*b-rfqg!~4x&|$TyP~a3;qs0+o{xLrzlSSB6iN_id{W5{gb8yDvkY2 zPpMc>+Katw`J5#*oz?72o!tx^O<-&cob9dcoUJVkKf0PY zI$7A+eq?86V`gXkXzuK6&(Fg0f4;zM=V->lg$f;lf%ym{BmPOz)Q&0YP=%izUb zYU=eG5mmsS^l5SBKDa>O^VLfEMo`&p0e`ye{1@Els;CHu&EYU}^s zKmC63EBv>WSviwhLInqS;T0*y|ED(`RlQK=KTZ9JB{O;do(LIj38AB6vFAH*rXodWym7T;A8^adcX>BDQ=_=$`@DT^aQUo;>(cIDHdxK@0 zyM7pTQU%@T_IlAs(=|Hpxr>F0lLg_3{Vf;a6APu`U$SnDQ)P}*?KnAgr5zD98xKLU z`ii6DMEhs;O z#6rO+Wb8jM>%~Ib#X^4**XzhOBTIL`3uQWq!V~i*52D2tGe#vNsqq+B0$ns<=BR>V z>4V%?ZX<`63&3y4-omu9sfS_RzTP(8HccC;WA7}GT`oda`>4C<){0do6D6ZJk zOiL#xSzoFyG=tc<8Aoh|C1?nSy=*cHSIoN3)=wT(J!|?X7lbKQp8D-$kh?qx3&tC< zFG7t%_o9$X{6I)m_GSiIMD(qFZ2Ue8%!q%nKgRjH^Y)v1>0d}qgnwW~;L`#G$}893 zSU0{v4fRA7l?2axQpGcoz4dBJ#XBNM;nQ$+3r+zOWA)U$%ZV zEQ(UK_@LdoE@G8r4c}Al;gClq2g7o7_nCV$=5^McUv!CMW3&q zVhAl9JBb2#@)92N5|9{s7tBz}-UZ0{)KFlPj+b_@)JX)Mxydmr$%aLXHQq zr#ZKx+hwzGP?ptNo47R*3q@kgO!9-ZDxaK$$;nITOjhNVp{uP5T4O&tk_!-#qhrWV zfX+{D|COLK$x5i{{_)HOKP(n1O_#4H3(XzB!qN;Jr_5hr$S-qnEF%io&ri@&l@&@_ ze`Yrjaefh?Gx?ry`OzLz&YdBAD-95GZN|*+@m*?6EWq)Vro)h<8&@6q+!fO0@%GH_ z=&=#!K0cvFOg1&dCuyw5@!fS97f%QLJnRC*x^^H9HL8p7mx?SjI$;{&31c+B@AkMh z+VBesnRfK5UQ(m`!15l_2Kz@TS!hARG;RJ0vUgzqF)Sk=w;l+~dVI`>xK8E(zGP;{ z_d?Wm6#htLZC5n#rd}|8fpVh`<_{qn3w$LEl~`z^Sg1Z8gcWD#SUjDgI%wKRbHcN& zY!YLT-FoF6^%u>FTf?$Sh!07Kh=5l`?(-^aB+_(Q@&2lxW$_4V9*1 zhSQIH#_i*`%X!zZbAU*QF@i~8$`;Bf{NWgTYd>4-VK>AO%g})nR2AW=^$+Bqa(46I zF8@T1JeJQbr{MKNCJcDAzKyrRrv9xu@{w+rYJTBF7$a$7*`n3eTqFH4Ue#2Zt`a#X zpc94^l?*N~0ob#38M;CE+UkrcCO1dM)^#CPG*&~>U*S2<@d=I$Nu!2-wH^=f8&3K~ z3G4{>JV}=@8aM{oq`lTJ-#}=%V&)(8SpFK<3Y7kLF*?MTOvIY_g5T-UGm%%u9C#nu9oCuy6 zCCX^|A3vlQvv!;kz-B$ru!2ETs%Rt#>e6&XKQK80i1@2nAdCcXg63Dt%!~y0O_)7$ zntQpeA2jDql>T(w)~uwubeWU&jnBJ!YB_mlVD{)}?royRs&X3*vq1!7Fz3_1CUgL{ z#^*?m@pC7Wh>{!~4{T?*BU3AAG<1?3hR%e*p6OhJvA1nIKuGfqIWeM9@m+BryZE<> z;@Xy1!)9(hsvymyo%Ts1l;7x@9T~6vszrapZ|05GOv;g)K8zBxFKLDozQWgeH~Yox zUK(KuII-|6XgeS>%c^fJZNm>8oQyQyTJ!Q6Zj(1;26XW% z5Aa9!PQv!N368a`kQrUEP_Jrd=MNDgxB%Sf-!6)!#-Ism(ga~y5}!WfCRx3RxSn&z zPzHr!i;ZC=L1PDCVho#|{)0{gv(gf0uQH={wVQeDaYk2Z(NAqulRe-oVP!Nee||L} z;Me$vVPXoIC+YqJY??%Mv()}INjL-?a{kY@UmBKaU-R3_E1+~EA8V|8m?{)y|6Zv3 z(v57e(|a?{Y)v@Q_0CHF|6!2oe@Fe4tzb)+_t9V9n^PE+j8}6i@Gh*|`d2hzp0W#Y znlf@Of0x_}NQHmrJtnM-;aRe}N+_Ws7&^itq4EoZWfE%cEy!cv zHO(AVSEmMWCiuS{0t#@O%>grF8?m3y0R5BvuT}piZs6JSB{q+)Jp)fhW5(U<&F|Lq zT-xwyI{IFm{&pmV!eL|fi^=tlKJfYjhn?LMOS7W9aI#b6#Vh$X)pG+MrFT1w^gTuwV)CTXV=ysgG~rr#1ATbT;$hSi z+`p$$1Z!s0zkHee>-FNXe0A{Z+cf&f$UsNIFWhqDp#BY~7%mT z^Q|R|H|RHR+MEQ0hwJ6f6a24?rkQ?U8*hIDT+3P;VNoV1pgrA-%`7-wNh$?+q)6Y1 zW3q8rRUiXmlLR?*?G;LbrrN#C0I7ZO|F;RAsv#5#sv|~S1TbPCCS{nigI!lZ!kp9p z+i7-|)rvWkpjy)Wh%0em2Z&erG*f1|g*FneE|n^%8cq||eqXK8akKuaWr1CSK=iG( zNZ&#!kFvI=q`EgHYBKXiq!V>(tJe1@V88>oL~vo#ip4uIcX$mLC4Mhm+2(heC{^^k zHZ*Dr^dlS2`j2EUcYBua@CIX3c%u(L2)%kaadrJd&*~@uWUxhYH@SV{Tb2&OalC5M z^;aWLNNuNkG((|oZ#n9zvzuDNr=6oYBI$1*q!x$tIknLZD89>l`!EO))YNM6J?wvy zaLYeq(UQd--0&D7vG8ro$xdYZ)RgwSyoCV)Sy+JUS$)qR(o zif1T;&XM=vvu|ykj=y|SznazU%xS4BJN^ zF6jX<1oOWkj{Y-5fsCvc#x->0Cy~g*J0m@QqOX3#^={zxg%FJdl4|<{IK9_VKeopd z^9qhF++UxK{x~kMoY}k0AR#7#!>3*7JC8k%1KYpg&Kn_NzelhF99tu|J9I*dY5C8t4EE#dL&CS%|dA{ zSN^(vn-C8JS2|W>KNk1AqXm^QuRF3=^~I{fH=nyiTifz!B4E#-Q*DC3Bz$G{>{sp? zEPjH+AhxjNlWTR*W`*HVMEz=JCENMO(aUzxTf|h5E4m8m%Q;U3lC1%kaZ&R99B7f` zsaHKvpzln08Dr}mqvn9^ZDUIxD~F)%JzWkds*P%Ci~lnD$Vjd{xH}w(UfOv)@9THB zk==B57{VKyILK9-RQ=)yz7coA%}=a_IJH03sS~@=<_T5|@^mdIl&gEjkFey6IE}Hg z_z^!~N2NppF57p$PQlB|mF~>P#czv4At7&0vM^X63*}^|g?bO2r`!rE%U=K)HQeHD z-4D(rlDaJ+uj8=<$-K>bXv#YNBJr*5-thk>n-Tg&eyg4>Cbzi(J5snT4Swc;jcQ8wDk)0G^72Nt9~f<=N)1k3vEr5G3`NB;Uf-F5N$ zK9KRle@3B}IH-KjtgEkOs9$XY*96ErpH)QDO)`&5(~iIPI^emwIVBmWTiq{ZlbG|w zAj$a_1CH(WucV*%o;OI-s8^xkF*_}gq?TC&)guAA@GV>2;i~Qg@IO~q=omP8Zv3>? z5iGEyet0@rq~^Z?^C%XbPvcfjP8_ zMNfww|3I+m-+Z6*zd1e5o!TaCbw4-`HJ8I(9@*+F=MEmcji2h*wT|r|>ESPC(Hm}edVw*G$vO}&{9-gU{?EG_XXlHGGch{8 zTk#meziBH|ntoR^HXESS?u{Q`zL4iogoZXSI-ipMoji~2rDbAP3&^c2t^|fx6UTXJxg_x3KXF72wds(q1IkpQYapm*aGv zvQ^HM!*pU}Ddc6onDGz;I;R2HwCiEy?4JM~o5N%cgw3|DSY3rqX_cw_BOY-zt3y*{;|r@{ydb}Bde6zD=sVJYf2Wv zb1d=oAXS(%?#?zlMzPRECmktpZS0@H;oWC6o7R|%Z#|?$`o0^t6q(Ag_&jr}XcAkv zm)zPk3#j`bxK~(exOYvuve3_VTqVFJ7|39sWT7g&+0xXrZM?Oz=Dvh@W$oqk*>4d@53n4UmPsIAeRTHMMg}ceE z&HU5XaOd?2mxN?NJR9o~HsrSEj9-@O%grodfR8@(sFO?K{Di07WIrSKOZ$q}tF$ch zf~}kv`+~S+J^un#_Tv$AxRA%+lQtWDW;ATcSuGr8r9Tq0SVLs`hh^W|F3Rcp5`=a8 z3bLCCV!tzbIKH=?Kl`_f%`^HFtV%;HO49}=g_z{{DX33)qOwbr`DKUWl1zYj@@3(5Ww5ChQkyd8 z3W`Fbc5wIru3XzVEvRQl#YTxB2a#;XNHPHLNlAM?`1XK1^2{ECF^t~&^JRD^-mkjj6?=0egCp`swk!C)~SJ>yzdr2Iv^PJMlIZaY01l z)MlDq37Qy1wQ0vdq1e({pP6fxelUC+pFk|d`|_E7O3ZO4<06C23|6n?*FU=`^sAA* ztnjjwUrNa|4aF(HYln)Bljm9!P_TR{-!^3n=}3o?RU0DV{UWKHar&t;`P3YC%SK(( zA6h&I*3uSFwvKn?LY(fuqdEH$jw!s$7G>c?B5r)~ZeD%-#{{KG14HgqzPb}k=p*}u zt@+Mn@$l1Da5Dn({mmj95(uy>jm>EWrz4eBvE7uq8KTRr&G%AJ{XaZ+=E3Mm01ZpHWWwELx@XQF&x4tt80X=gZxEd2J9~z;j$*rtVqM`+!4N~(1R_A(N2!p8H zwFemj)x8ldmr6%|gOsqJ62C8P{8(l&XYyBvt0kipuNjYfs5=1bNaS{tr?I~cv%2kT zh}Mzzpc+obbkWEjRTIpwH+$Z#eD8Xk7f`D4hwyuS469o?J{r!)onBVVA8d9k*cvG> zkA@%&Kxfpg@&-x^xSaXbGFEG{GtS=NH}Dd^+KU*5^mmzuhL&H1LM%~hBY3N9@n&Zo zUjDkD>bX;^7<{s|Q4`6%lsjmMW*^IhBL0NVp~O{$(|9Q6?4n?dR0Z_qE5;{W!mzdr zAhV~S->3M9QV4OlQ|iCRA48h7UjMiuTWtb>zF?n@W8VYTadhfBy};$WLwr{c&M6cX z=1I>Z-jxcL5;JLKgzn958>Y#A+jr(3#1RWKg~7 zLa2?IH`!w z2?uw@s<@FZMF(uh7}%T4f7J4@kEzk!kPOqm#+2Hid65Wj;mxb2I{ z)AP0&?1EW)J+ZA_G1vLc-N6jRl0lZ9uLcT}s3j4JhPt(9L6Pe4n^5+SlfDzFp~6^G zOiU0F@drsPm?`^{0{WjvLB|um{8#(@`-_?s63 zgFol@-zNF= z&#POq7uy5%>C!EH(N)z+Tf0@&pD4c(e!7^KZQa*Il3G*PoU7EczH%31Ft6a+B(+N& z(>t>AV5}?T|Dcsc2 zqFdGskfaeL?-T_%fCpqD4&G}_gcBYSqR70hgAA|?5PkN*reiUg(VP#m%j03&51#pD zXWj@it+TK2f|qaZObCbVGqukd_bNfRM?IXoAovMzsB&o{4FQ8~OIl!SYe{`2N}Vc_ z_s)XbVQy?{ti?xoP4CdEkTxrafqdYz6XD|HY&C2uINXW8J<%3WK0^R5J=}hn zujTZwS>^isRM!W7C$qr{zNR4@er`gjoswVl?&J;`j;E$H44sPyCXd2ZxDWEcO4*4~ z%rzQ_Pr5tSYfUHMF#FlTmOB|zJZ{P%sl?ERCnFBAHw1;A0;;UN+U3kc$Z15aO zWv;Uly3E4hI|s>g|6!JVBVy*oQrZ)(CTCax>K2Wabz8HmrD}biSSFWL!tbxec#e{) z^niPX@nc^bDr7HzU#p^;`c>|r!t&jFi7nFo?|(o~+uot6MBz#)O05xw%->r42jnT@ zD~Ng}{P@y?Jf>z%c;dVd>|5`Sp}yek7ZOJ`C+J@Njra30eZj)~Rb~GE`GZh|CbwJM zAuC-tGJvm(?M!kj=dS<+c7FWX&uA+Zdr0rgY??v7$Q-5sPvpjqKWr>G{0Io9OfyY{ zV`Q$fr(Y^!4}w{GjM=w{HXZI6 zw1$&{E?*ZlO@5WNBy5i62-Ih(nI+;S)_R>N%I|WGkr0e>ToqyxVXa&c;YwWKN2hN6 zZy;vP9J9;jv4s=emM&(bOJZ6}>8!~r>BHf|HQ<{HmR$f+UDGzv{UH;!shL&Yl!-({ z?pytX+KpH)+QCtT^A8={0*>b>*ENvFS~zLQwh#cD&e3nGy@akxu_^VIE|^+%RT6aM zmm4T%vv`M7LO;H=nA1;Ly4p${EPUXgvi7~UfT}cCH`EV30w6P()Hf)K-z9FA6i|A? za~7(e84N!X|M~e*cx&W_dN3VqYn(}(4wn$iHqJG5)A^=vFiUtG+w|gcP$aUBnU#0v;jjZ2c*GB_GJxB07up((S_4*J*avuXaX#!X2Y>6&OWJ%UA~FY~ZYPS=-?+nJJ|we`+M#+q&(kl1hY;_IJ5xfI`h?Cdvnu(EMxk|1OVXhCjV^c-CCtC+VRe|PcKG*Z9?ek5)6fYEDD zvGqiduM_7EaN+tV7&<+pgamF4+0&*DIU_%C!DV)y))ClR(OY?3HC8%3l77X+Aza+_ zB@W4>$>Iz6T>7?$U3(#`l*O9T(3ln=l$Dz&_v(_Il|6L2r)K`Ssf9Uy`+~W{Rk%Y> z&-hqS_SaP3%GAZv)*kBlO?4+kLM7p4l&!O1GIbzBZo+mv(Ek@qMLK7iTp^XyUF})* z<#75ESb1~0!-gZ@R zL-jeuA^BO^5eTSp{@}BIbw-V_?JQe;<|m4C&A96Wbk@etkMvGfplqDT#u!rS?_q(T zf7~P9H=K?pl8+~Cqj!=g*;g9u*fQ$xp6QwO2l74bCqeHYJ@1ZL6jg8Et_n(89vDht^ekAmFo{9b>-4evh_xTjYu-j=zk(ANI434xTK}-eqkGGrOb( zVe*MG>BB{;jvTZv(dOInW{u|&OPgJQD=oBwimq9R+anzzvo=NwN?XZ0lb!}aL674fc>eAwm8CyZKCwXHqq9Kp|2m*c~; zN7RSKpQZfr-+zu&28?p|j>wMS``rDUe|kfJ?GMi5QQ1dobX5%uLD1)=@W1PXDAjca zn~5y-UCUsn%ym0tm}nlCfjFCm%OzHX62TREJ!&ht925%sa&)8oYw`ye8JXiLcWEG5 z{`ws$BQ1_@isxb$&N|_J+~VB7;#f1bQD!?SFzhjN)rVI1SIhsa79i>iAovZ?9}{vc zqH1&mt|puTz0ZI4x$U=V<5T2LtU}R{;BzbGDep>q=Jg4EEXZku1(sSAM%mWf7ibol8X_;bB{Wta|F_v107}0VH^IVM$J3!im>$Y4+*E#oJRXYCDlx+mOCQv?E2| zB@)P-oO`%e(Wuk+gxXqy@|dKw?VF(J##vgZq^C-}R!s71aVNEel5PxY76=%26L+q(#jCBtatIeE=Wn_hr>lF>`NE^(RgVOr$LPk6(>ZhU-5aC(knA+N zwvHbC#%!O#%cc|3yUpElOZLaK(oAX;YfLfe4KBJ}*+KLmOa^~k7&l=T20T51%0a^7 zYc$C3T!INJI|hD^BN6Yj;#USK&c08-kzA_y9gOQc7)nt>tYFQwZ6X*6n`uj z%{2L=M9o+oIH}?R3*Tcgm7`eN%b+ZmyXODrO`gmaZjnS`{p->G$P;b-7tgiF8G-xR zh0EO+)1;^yYHn(OsS1%wm-LXzjKS^FjxOisym^SIay&*eIAf=Bbap40o;^BNK19oVc+HRtPy6|>_<+nGM~SnKgsg*gBM9B?xS$} z?Edm-37Wkaio&b!@ZC&tCjs9ee%}AFyqkRDoAt7OmlA2(re6Ok+o^trQ%Y8gHYgN{ z`wiK2Ucx%WRv$el5vEjiEos6vrzvJz*>ygKMjD*`{ZHt8KY2(NuGB8G5CH?~@oCof zujo~9zP6J4}MO8#Whdc|Oo7o5RG<&J0-t4bd9t9c-Z6U5zi>0u=xCGpm5{glG`)rU}~dh^L; zY1g$`?Q6c->ilY%o@Hu@K6Dl)5&Rq}mq|@$^82Qf<_D&ge`?p=NvGTeW%TEz3pGR5 z)Tj^OY`N>5^Ldrb)~uH2I)lUc14Q4>z%dG~wwo9?-UGF$PwSD@YtlPon8B0ymKuyN z7f0Dygmd(jp9&XPpVG{G-uV=dS>zd)`UDt9Btc~e4+(i29`UsxF{;fRi3~L8`)+r- zBcr9AMtzi|E)?)C-;&BU!K8M3qLRW4*=?zzKVeKGi6t`>t6cjrPjxjW#~gaS zUXum8QpZhiNk)y1rVQWi&21421^%9?+|Q631TqyTs?fPtPTYj6fy*l$Qku1U?6|@Y7dIsbuN2hK;YyE{(Gz*BA#(4^Jf`QE5=jU^GX4Q~d77Z8mzRJsCHEZNW2WF6)3UR0Q^*I<@ zPj2-k=kNHa%j?zl38+i;tl*Dy&OnFcdhy@+D1Ly4tu}^wUhNH?S88VH`ElmewzX?OhB4i0f7yoMY zDe3gBFvPE{awZo&IaYf5z~@8riS7RW`u%(A)WO9@h%D^(s=GfjW~0eX7(ma<8GNl< z##HsV_13S)UW7I5&?eZ*N&Lc4szn3wifq>no2wY=62R>y^*d?%%cdR}wKP-Fh#e`FMjgp*Z zkqpHX#r$;tLO$$NM!}QI^g8@>h~O~8NSv7#PMBsH1%!JVqq8vD8H01qMpJwxaJ0!- zWn^rlVd0)UI!o40QyyRLgw|gkJPS`$fJ5Mt5~w=Ds4pu8q01UiB)2kiEn6DF9VV3Q zzqglggnzv+_Kyvl(FS9@#TZyzk+(4*rcrge!2Y!3{k1RXC{CyGV*215WnM+c@K^Eg z)WaOp@J?DXO4i_;N$;U+GAwWVM>uEx^3m+VGTC+h+{$gOB^L0@P(GRcZbHwrG;^YIsOqC7J$WrXq>MLqk8`iYRGZ9-X)dX!;&lpZeYk#SK!s*(2mPUXQ9ttwcjqHy#;+*r4dfa}sNRs!Tjee1atV`!MXe%j8 ze~h2}AoXQJreVh$viDdbOE0aUWHlb5g0*IB%^=AcK5v0?I7*0eiK_p4C1t_QNybH( zLmjLJo)!HXYhs%>UFJB3G~D<))NjmAfhi94;$R<3PPP;79N%utXMo~@QYio(5h#iOik6dlsNMW-J+LlVa}&w(I;skXE=etAHB?l# zpVOLDeZvtvoUH20qCD2m)rO#E#5>5Rt|Z0z+j zsf2`plNg?{+=exnZ!5)j?Rr|#6W*4ydj}qyE6v3pUX}6L$O6N$U3N&yb~WVoE}nF( z2(WTD%~J_pkv@G3WZCS6E8xU4oHCB`nE=d4R0?!j_9F9Aq`MTa=1OX4;)-JTYF<3n9Q9--_~XPSb8tPFKkON zktMRym+obGf2>)%D!e5(eR3+z5+SOcs z1=)1pmouor#}W4!^6WZW+fK zXs0h6M(uA|_6KVlYZ`iEOS{!IwZ7@~BJ0FB(sq&tN7sPw7zM(FOj$l*><OCOQVOY{OBP0l9xKJC(b3j)R*ejE;-nYKN`y?wEN-4K8v~=! zOtJ8l6w&JQd`%`H(3XHM6B45Zi%U!d+v{ z9KkesC;pdrqR@F_?&_ziw&~?d@0&m|cqNGG2;$5M$z=1PU|?@x#;M4aH1}X*p+2wH zFjQvh?;$<(HtFO0j>7#HCyG=a=&WIrNNGs{vtitLm9r43&>gGl$jlpZoi{{hx{c}l zpW|1{8;;a&R#;V}JnFL^MW$*UEd)hGSm53j)USCSjaQOh_lkE;EO^q&nq9m4^6AZ6HXoblh%@C2+7Ovdx+EY2Lcp7mG+ZbL-{k2+ z&BMZ2r{@ts3Kr(@R;Pcy!B9vUn`ifl(y-QnR|3yT4}T;|(41Z18LOA$Q7KUt|NQfya(} ztn!Y4W(p&%>T~&=p1`KV3ymg=IJ|hHcF^4q4Y#kC4kc3+LXyofyYcPEZz==0QOQ~4 zYPJ2eeok^a+LV`iNMOu#POpNhVO52 z`Lw|ufeFb%wbFD3A0SCzT291eEfwoO)8KIXP0HgOkOQY)abjTJCWD-4x6UOA@v6#I zT2%#IiWhC8!sXmcD|_Y40^u!d$+VZo)oV9>`w6zAd=Mzjd0aS2+UkvJL~=SM z@)Ul$@^O`jmm~EH*i~aPQwHR^>i)Z~Iagq!4URZ-HepxGc0FMBatm?4YESGvok*5h zOBEq$e{z#tllgi!^s&6p-))RG&UCG(SpzL{x%6ctBzeB26@^4<;lE*oAK8RU=0&W( ztT^i*AHx&&-rlXwM-eti%rXc+qSPjoHIwDJq*IY*y?m46ap&DzXy@>OZiHrGygm(C zKceNsH6%ZlOw?|;u&~_;4&&#}o3$UmX5}vU zdr@3zvQTQ}4*gAq7K7^MS(a2=*CaZ6yy_63ZJScG{qMw$gU^r^Gr8axJvd?#<+j5v zEsa$plh~#MfWF}g|u|P2iQm{o*!VOraV)8qzq8mUz=P>L2%=- zQT8-|A_4_5gIc`&NrM_bpQaHF#DVIttX!I-+IM>1_9zjiQD^fV5beGODvAN|M|(Xn zst`GOaU{t)TPHoS8H-82QA{1(@ydo}c(U)pok&Dv`N!_1irR^nu9IiZFuYaIm_Vvt z6v#pXaU;fWEx)7=VP703!9{e!^|S?3#siwVy^nI=@ddcE9NS}mudqaEnixC=gTlS3Gw&X?(zHA}XF5H+7`ORT=dP^eE z9GA+}&JRgK==rkf1R=2;5C_OezB<3U%Pm+FEvg8k&uujMyS#VbVCEegz&~;=ndGfG z;~BhgxiURRyk2u;5k67^z0kx0pNV|>u7MxBZN}j`|Kuw$yrb(Rbv+p@Pg*x|A8sr9!NWW)XC8C|(5S!yqf{?F4%a{mWrw(7kGgjOHE^iK=%(saWj zTKysGg|Vu${vaaB>moYJOg1yoSsI`gYdSu|=kD>+>(pKAds5$m<&F zIxLw;;X9JOKI&@-7L~*Ss?F*aX>9gLbg!1H)+`Z$>w^Nlmr^=+e!1brt?*~R5te?w zISWF65>F^w^qGj_UQBTiewwD^JiciZL11MJ-ehNIKuO%wJWJDwHx<*uHL;5;3C>`T zqFL=4Kf`-B#;xNRHAMU<>W;Pa;l_|RbO7%ANFBT5f#G1QUe=mkK>IAIv;MrlzTVXm zrE^kKRLC)=W$@6lZCVJOTSMD?W&_gJ#k*t3KNB+PyrDgW+(M0MDiZl^RqpRuR_lLy z#K)iCT;u?bs@eK;A9iiMcLS8hS1_(OAa@Yu4*LVIXGWyp=#r9FViuLsQ~Kp0e=AgN z-ey(7!$mjb#JsesHXk%SXY+tcN0n_WG6G*|vsa+uR^9CJj-t{l+{5iPmLTGcQr;y6 zCm-_Rst|TnRMqu-HW8-OP84oV2fsK3mV=Ap&tWVZz_bHC}VUv(tnDI z6hb;`eQEZoIN1b;`aCWaRvwex@QKJ+@u`d`#E(+7rJQn|oeTCx&{m#(l1|Hd9EDd? zyMG6Ij=gO5DP)Z<5*2Q5A3eEM(#qZenhVSmEtn z)FqTt8eC#W>T(B{660o0zfo2&Iv*ofv< z_tYB>i_fzakXNup$)yOqAMIBQvo#uyf8T%@2ln>!zj~J@jW)AYI&(hJO+6rGp~b2? zm?F&tfbwd#(IKJ9j@~c1^-q@1GN$5I!(~4e_dhv&L;A(gzm-){yfozY$bRn)hW%pd z2e&O5=X|?57DaRj)cs`w#Wtjt!&OIkV^yP-eXLATE?hwTB0|cu-gJ>CbsXow3Oe^7 zIYIh;vr zo?|mpP|>+n97!%*L=&UE%xUrzvp-H9m5_$dMN6 zU?dAZP4k^dya=6r$$%NWWtVSyzeF&U7UsEXEd1Ud-R-MXqElIpAj-F#n86!r`*+Lk zYP`*)d{NCNg4U*)2Q;C)Z@ncr@yL|1KA!NblGn-MFAbG>_kM!G6JkfzAGc zN%yMuy#_%B|1l>D88f`1EZyB3vc3A+)&EmS*3ewM_3L*h| z$!z0B1+&S(4|w_B`{~)CXt%rTB420z{E^O;*(m?f_%`t!@jOU#57UA*rO6;Jn(ULk zBwAZ@VQ^UD!3i_EerubkpXPf}Ss6(^x-(lDio%f~w_|h3@vq6NNI4Zd3sy1U!ElEC zYZX-FkpHf5i%nbawK@*t?OzLX^S;T-faPG*+xK0`OG$>u@rm`EJn5i|-)hkOByJV{ zUZ6aVuM1Z!R1OCGQ4@5Lm2gQ7iONf02uS>8ir^;UVN5xA_KaG6MRx;c!uwm&y&m@%QR1BASXb6AxnDIl zX3i%i^^C|3-d|Rpq_#bD*>~L-UUYobSs2>;-0a*;}*EDz>ULWba?Xr3jAg?1@jjsWz8f%jFnt6w;==SuTE0wiS#0ie$5;kqI zpbn`k1|XS{ph2;725O=*g$opbMzlM%9Arrm*+iDLJXX9x4F*B3E z#rb{Rt0svE7uVIp8a=0&VG>*vCTBXmrtfGd_g(Epp?a&KLoI>q(YSn>qK%Vp+<$z4 z6XvS`uRVrX)i|AR-61+NLlsH0+k-zjCe5ZVQ{ACOTxmG@l`d4BT^_*!#`1X$GAtgEhWKsg*&(DVuNBYp$$4|XA%y4SZL=Lv1fsIoWn>0*tgB0{4F>70t%~+msKdl$-Q3!nl3BT$IF;3f z`3F@~PH1;~T=cH2G6Kaa$~pu|7bhe+K50VSoe2R-E@{hCE3@*;YtGX=Cp2u7{zrvT zJ7tf|Cl}k-*J)CC*<8^PA+sE2PsN@k$Rj1i5%?a<-gK(6jnRS!N9fke*~2Y99-NOs z+t@;ZSXU(aS@b(!6MTR3%v%HXO?O#kJ5lW}ZJs^}tKLeM}3TNSbpUfby}@+X%4brF>I}j|5XbB6sdF>bH;ix#!wL5ZECC! z&CSF^uiBHZheft@Q<)kmCV=?EypFc;T#X$h)n4Pa+f$CfgT}+}h>d+T${da&G@U(0 z>n?khVNew(-|kw{xBGx#J`NmxC3JJ=!jkx6rtFqh^VZD4eOve`U15v>YrJ=1vVXnN zE+R6s{QE4CnVijdkN}dCqUL7`ch*^7I}y(pjS&G@eLhx_=AZ^5IBapq-AfR2gf2_Cc&P^4 zZy6Qm6TAx|fk1#ja3{FC1t&lVuEE{igL{Iz%U}sGNN^e4-3d0hyIXMEm*0QyJ@@Q> z+x;?gX69|HuC9K%s_PjjdNEZfqT@^BCIRmYYHaauCnnJ2USnvyO7(xvX%7F_$miE_ zp(R@J;jAp78$p9$K7!=O*~vr{rT%F^6C%kilXlNZ&3x3RB+jwf&Tfo14eU zaTqq@y&0AvSJlx`)kvH7@5O_ph0Rh+%(DTpKJ&crarc61KHv&JB9e6G$N&o2PRjtk zJFmPlHt(L|9hj~aY~TjUPs)Ezl3dvN*|W!`D`LZCvFypNUA8+qw%nyPVLHtGlEZ{z z?>m+i90hTWE?rC>D@aP*ysu?z*$kx)(s zDgvc&^cW+887buv*WiE@d!%&)TYv?E8vIyAOXSSao+M6mqipbP?6wF)okb`VroGb> z#aod{R11JBPbW-u1u2H_!f5P=JV%=R70~OyDGTc7Y^6F!Xp3qP3;>uQog{U8|a&mA0Yg6)%c%e4uA_@-ku~m!5 zh0DT#P3x)KekL7(Em{&Qv?lUy^&0ivKe$zLEPVHUEL0S<(@k~8h~0z1N0o(H9h6&! zh(bpor>1#$zl87f1Rb&E*k>M)EjCq4)2EG=%pwP5)Wnn^z?Y70ev@m9!^ zus3nyj=V(S)b-q30y=(em>OV-j{8Yj*c?b7jOf34pVO1p+iq^E6h@>|MweYub9c3{ zIa{?c^kQ*3wwd>5OeDsCjAm%5U}xsE<&sl_aSU1w;l^@VC@qYrZ=hS}|Lgv^E+KS;ju$6APN@ zGAoQ)$D17`sJ=`RoS1=U!yC^U3~hFv_bJNc7suY`++pADv$~kWB=_o^dNdw3alR`@ zzrTcS6T6M1bswjODU0bqMqq(?o?P}`@%G_1x6L$#Mhc`TMmkO9(hg29g7CwCU!J8ihLOTltE(|N?|71M=8&3a$Dp+h%#DH};$(0pQ{fB?${@=$O(mx%T*#l=* zyX-)C)YLBl1Agv!-1p=ZGbj~7$J+B2%J8e4Slw1oG?+R_Qk9B; zbBFV?C6Nu^%+-Y)@u$_S$l7bMV~_QlXO_>Xm_V5hp$r_wRAG^+sLN3`p1m zO^AKx*a8(Ow)1x_3S0~n*L~phB233y8ZEu0@=v7(eh-7;-ODJrIPh9obPF4IDWUrl z+HhWjFZ5z9i57Y3ZuL4Z_@anrQ{PKVLEMYzHQSSNe={!mmKjo>zPK<3>xq> z(i*}?f=XCf2sHM#sgehW`>J%QA@-PWrZ$K=fqj#rFNaT>keXZ~_A$#qEUdFPF`fp+ zu>$Hu8xq4h?YQ&kG3rbt(=sW9zB(88C8w>FeRTK9hf zy_PQPXOXb+Mg6pM>sqFAK=cvqOnYz6mZ#1qYg|CUd7wf~!XjI)?`Sa{CzTG!$Rocm z6}gZ_V*mcndQXx*88#EnR%imf+`)DyYy5pBW%G|8UhwByocONuYEj`ad=uhtgG5%H zxLsW#S#@6}B%EJ18C$MN$%8Cf(hYuQZKYzIcrJ0#gDC=d1DYXFZhL(LI3`$+gThs# z!7fgIlDyjL6g8de%l&n0&<_&=*L%1SMR{6Aj;7Pq&jEi*O3bF2V_g@rpGLCuePc0l zEm{XF4o{xoEm#|xau`vW5)8x|c9o~{lQ>M!GgBc@ho;4^Hm!g$>qzPUzIa_`uv_ir z&4QrDytl{2Va~ks4}~_wL8>{CvIoZ$3s>1z>c55;%${*^6(AR*3&4x#0b4{PnxEsU_-1=1=>2_uX=YLZd3z zwp>FFI#S=or%a9XTKbS5ZCj~(8P6lEaj00&wmIZgdEHWik?E4^m zUSwMKOX)3;Zr==6Sacz4|2=#&kS|Xzp#dk^P&H=6MYS|3`=8+(63>5LNpN;G=TEW<5Qpi%D$W2&S<{nj29Aym zCkz^Oq%2`8HcRl%K=;V@_kZ)EkeGvW} zzqzKF7BT|gTXw=|J@YgmYaX~GeQi=j`)jjFtq?b!&W+fGhdawas5MNp>IBcCv{PVS}A z5m;Aa%fPT1qeHCrpwsII3^7z;-$6FJ8s~rMWVv#kW2l#Eet|;WO~b5?rkbIT2e-aR z>*%i?nrI@E#On5M7}>spUid_$tj_ME()vBO8agiN8AA*e2G-_KMBLt6F#e3cl|6Mj zshFTM`*k~Gp!}~Z_IR-rw{|py&oAgnND>RtMX7%HYF?X=U$5)%pD$oYv)W8GZ9Ijw z7wPoRT~eQX;tdSPE3;FGAP}y&+S?nJyP5q5j4t$iE9=4%WvbGYO$$Ln?V*_5Bv_Xv zN0^bS;w#*{c7MGvBp$KCHm_nbE9F^-DDblCs;prBUdV<2S~tV>)Ixtb@^zO0lg9rhz5u+1 z1YK~xAZ;FNoWag`R~sCUres+HUKYO2!xr{_Ye>d3?!SRpI~qYfxKf&ohX+?nq@Y1% z@Sp+nOe@TLXy&#pp>Kzm(EQER4#SN9oLtu7!5y)&ZR#GBmw^s(`C}ij6#4 zB<^0eb1$I#85kvC@P>cYYLez5UEpr>(gx9uu#Uw4`8}dbB`5ZByeF&}s$LeL z;nQ{@R~D~ovOtsU{spiP@NB^tt?Wt{HYX$DennEK zl7JTFDJ{p0!QlT8qXPy~il&B7oJeKIW77vYngp2x8aI9&EB}J&S>$k;MLM5n`}#%iyv)`?4M?}=2HB1hU&njV^-OB?q zy*YS^vwlGX)?XMq4nRhMnXltX&>-8Jrjiap_CKt$0@%+`M&8QQLKA%r5?{n zEvObsl&nI~#%gAGwqx2Ys$)U*_W-ynOx3OH$=D6Y%nDaV%Kiy$pHKc7*V4i@P(2od z!Vy7R|GUO`44&U=Y;suVh>d)riZycOo3$qImyUUq@@wdCU&%4KAdHW9)In!avmlzX zD-}cf4q!WO_78FmK&s`WCJ*wL@z(I2&Kp2!UF zaEnZ(bi)F_aEv6IreXN*4lcU{`a89g4aPNKz(<;pK#< z6u{;3il$+$fb1Tia>0-CK(GyHMnE3V~AJ+4@ZS1YW;R)KNDurS`FwECl13}y< z34Qm?s1@mM){IW)n@I28U%1o%6cf|T?SJr^IU`=4zz8a{1sb%7DSj+s^=442;)cYfJ2}hA-d>fu} z!B>cw?x-OVr~~TNv?=2M?3nVeMOKtB%vE;u$>bxHm<~wu>+E=ZN&!8L|5x-Vg;svb z&9GF3L6siojV?mV@nSU~Xhr~ZVCYY6Zu6BvzR!T?4Me`ORN11GN9$TYPw?c0=vwNAL1D{;G>j9siH0dE4^jFC$*1;+aJ*(Wvs|1#>oBY$~3r0uOrePPEV()-` z(Ilu>Tl?XDxC+DDv~mc`r8{%BiVQQn@PxL=W&Z0X_Mf#N3A5RBM8bryL$Eg z3w<)8EgX$?8L;>m18-`f;}7*5>x}Ts0K6-0m-f4x{Y3~YajKw z5`|E+FzJ4&ioxj@5keB&I3Ek8U|!O@8+YZ_?>LoRt6%;}*yzDLl6-nc4F3lLJ$%$j zKKmV*2G}y_DpmWV;ik$711E;3lWz*mLp2TK1W_*h2eItp7zeKy5B(h;-m0(5LK5y??gC1pQawhOuEGv}tZN*kcWD zEezW9&1U&s3v%YkE-1(WbpU`-m3}mL+ZI0ytR}TrDiuB3bbKix}AY`%!WhDeK;*0OU25QL^vw@Sko@<`_#<`Kn z@WWmw+wUV#F=-hRiw_O5rrFD75VB@D#Ba5o+H*@`&8%O^iRVmDWOnld_!~YO-rnQD zB}_hK_pX;{*{?k-eRM4=?ei?5*;tHY% z`vGoT+CM1&W5=vuRAsj5z!8fgzjf$cBUP+_(w@$0zewBoMxq}q{7SM`k(r?f|4CK> zG>+Eu!6gVImsX~jGg(E4SnJ#Gq=nKXep!08)oY}*_YeNRlbXcolYb{o$`ogj=*Mcv zKt5$-(S_h!fe?kU&M5uy*851b50O5;pMz=HTXhb?5RV2FR1;nhu?Nno!y(Z62>qnB zy8AgXFuXx{m3gVEb(Fi~16c|)TpE`={OW2z4jnk8(Oa4RDolkzkzOq?>0X!KRlRxE zZ|a)L6+HbQQ!K#W;$E+E@UFk2pAifd7@NLk1mk+EWI`8yJQW{fEt0qws2r$*za^_2 zq^TS@^>hAGk`elyLN`wweJXloiv2xQ@-U45 z62AQf(flIwO@4-Ff=75l}=j%bo=&(72@ksZ|XQyum24^m@_ zO&-1iY&Z{EQ0Fyn@VBsxrxX+aA3QKJ#o?m-XXTq0t6u%W;W1Co;3Uwrs#A>lp7vUe zTSi$&Y@mlB&vl`}OUL;F2Vnh^I?tay`VSe27gu22Ohils$r^w;`@Pv33lnQumKQM5 zF_e9PC5CGK7DyqpPFFcF27i-lp4ERdrXmyl#&K(y`@4OZK3utq3_-FAR)CbDjmR_$ zTpXAD&u;jRESch<7*@+NgGudFn=)qY!_WZ3kHe$v>aeQGD*G`N6$mTS+Pv_Wfy8B3 zE-{-&Z&qM%T~(*@WQrMny>Ebji1W~q&#@^JaUo`W>+$YW|M>#Xk@)VefLr^!xnB?# zL~|Hq?ZZ{8GM!rJe8*IbFN}5|yG*f1nQIob$v2NC2;aCC%H5G3(3gz^mMN}JR*}{| z6kIb|-s}>On*@?DXtybfLh0Jj6orMAs19_pZ0F)3LP8LBwjGBThH9hR+!zEw@2|5XCbbu9p*b+20iH3Vif|7B zRWY(wBBx4IvK?aJ>A{XSYh`Y(#QREL^P%^c{T3wC+6K{2m>UL_yw>YT{)-%&|>?lT7hpvWj`XKPU0*jrW#i7Y&^E;*1*nG(ETvo@F!@%1`#7 zTZ$LRTMLbjNv(tBmMPAm2aA!opwcjc%|8k6@`Jzq$U9-^_b$zzrVGONA?B@IDoG+d zx%GN;J&`*O9E7eq4m(^#%b{MO&6}UD1<=p#F`11G;|r`D?@0donQp@yGG7nIHHuy$hr`qU7sbp1x=>$tvJK8XaeaKy64Wp58nJ4Qn4= zqUyU|KAUw&*XsNcwXs8B=@zbfLQ+B${P~nX+3}@Azu!U_P8STZKPZqS?^#nv$wahO zlbJDbNNIyRv8x~FIHHYQ9#fmtKK-jrcCGf?*icE^#((ys1YiMcstgK-EqOOy{f)QL zQC#5xd4R9Upz@NyDh>GR%{wcQgs9U<;vYDbPky11fc0K?clK2HAJoV(`Tszgh8Mk` zSB1}-bT6@tk+j(Cl{HWb-hTp|Sg6eD-F=QK8{c9%Mrh?##cf(y-!leb2-DtC_xznS zw!8K)XcVOV54-%oVXF^E;UEbZXIqiU%n0#c3|o{C#G<@~)fpsmxG>-x0C#_Wp<7A*$r1kTkt;!Ms{(nDhdd9>OaOijd#z{I0bpP4yTMzQRZ|l$Jnjw&K0lo$JD6f0ju@7blmG6Q6_?P# z+kAc1C`lgV*pK(R9gDsB5nv1Y^M?@s<|$ObPUP&g`N2o7Y`IjA!i6RnLw_go)u}=b z*BRF+m>QQUDs2>gc{phT?xF3wtD*o}&U|f|*A!H6PHPdLquNo`Vp3aId(rOB<;wyMeez%v=YmcHOI;i>2NUg)-NwW;_T~7NV^&^}qik06GPMPL01@6IWpG z+Mz$Mu4;N5P}qaqBim&^hz~q4&c-`=zGKhJW5Y-5iHf1vyue;DXdGJwqfrA37Tep7Rv!{d~`vYm)o$uo-+75s5p#x!?ZyR;Mx<>V>l7h!tW^S^s?G z4M;)iRSKto6cGNwd(XCClcu!5m+IVQ1LMmbuf8GutyZ7l{F!Mp1(kODho-hGjChnYqC zd2&QkQZKRV9*Cm4TXlV5zuRW`m?QM|x~6uxkFJj<*l|7$5nARG>#*tuX&=VS z{Yo;+WmRfJBIHK28En1k7U-_{6YoULvXzec30W8 z4G89S#?8Fo=7&*~XA3ZeXnq`syApeD*#o;Ix+TLhGtdaUo&spc?E(ET-Y)|$D^WZ!30vt*DAp1^=`$6)4FFpx3Ii8(oc|18>bSB3_(Kkz!CFK48>O`gdlm$`UdEW)^ zrxXo|p)k|(mK_=M(e(Xf6S85(jkVd(v`?-r8Zoqpp)rXx{qlsv*Y$PHOI{D7mtVue z!q!&4++0~3gK4oi9ykJZ-_RKBygEZ~7Ix(MRrusVctGU2cD?Gn3or6fkYkBxJK}T4 za{l14gPld+2m!ICe;jKAA5=ph|=AfL8s@{{3 z#csvkE9p(aP5GdmJYa!-T=2U#yfO%5ayP$84s>*A2NB5i$=<$nj&r#3$ePz@anTy- z0KW#!Ss5vHQ50DB`f`KVb!PAA$o6(+l-xv_%>1`h8wJYM>pC*|P|6Gl?m6}L+ z|DvY6?*IOTLj-_BU??%ZKpEh(%v3orU2|Fcs(t8XfaAK&t+B8R!&IB|TaBcc+QOB* zex)vdyg!p;lS+8(t^%(FJNx3wi`-WT-u9(!;SWH~A&^};ZJ6}=LkgkXMI$?xgYwwZ z!@XPqmM3Y}C!?PL2OP&pj!(J~kiQBncTh%#bT-lE6)HfCv@Jbl3ReK@Ze3Q-049Y! zf%lv7qXkc68X|Wvx3`5~whI-Ge}TxS_y7}rG(JpiCcy!8rl-U`C+vB1czwAo8`jNd z^~J9)`ES0VHUqPz4hl^T5JoIM((~`R%Whjy7XWk*O*Fw^Bj}EyyCvCRhR^lr#hcos zUm+J<5WEAGtax<;Gb(}#d_Ef3w9cGg9nOjO2SdH+E3wy-TZjLJ=$ z2F84J+ZL9@#)PK+!}zDvm>j~d3rG1v!Ur#%W2HtCUKqgqW9^inUaI@`0v4TvIVqxh zCW?=;yD_JpL+;|R0hsABS3sEn2MZXof}V-)bNn#bV>pqquBRCCXYo{3O@EX?L($c< zl{K-3b4=Fcv?NrCW^U0dXbp)o#2kzF={5xGb}Sfhf+0Jcadkf?8^GkTk`A8i-l+al z<{Nb_OE41N*k5*Whhr5u5gbHpTU~5yKy>;mO}}IH=^>?_2kHhw|>$d(*a+pKK-`m?Y7PR~e578ysnd;4{oOXiJ=C+eTk9#g{;#9kqwju9 zf1NPmyI%o=t3={M(#Y3YRNX$gtLkqm3K0oE&S6Bo>u8@Ll&>k9*DpLnQKZo!T}gzG zSC?E)6Pw;+vwqJT*ZPS6kRu>-_x6gk!fWw-q^hkYy;xNx%fkCaSO--PxaC6oJo2iS zVn4*BK=`xAyOCJ44)TfWApSqaC|eVo9m8GZy@`Vu}|_H6U*g z-tyh-4E-oo3C_Y^qA#}2TmM{(a ztMqrI3Z1dCRA+I0RM}ziexL9pAyq1Nf`yBL~B+ddzSUZtV*j zr1%2qObo@41o19$K$g3LvhZ|^`rT*4m|-Ket7W+91TqJ`PSdP*2iH-oo8eZHNM%AE`Kk>8S0osxqUrG>wl;EE47(}fep(IEN zZ{G^fA6`@;=robg;E5saWzu(ccH)bpMrkJfwO3Fk1vGI)gY^VlER2p_n_`VB$#}($ z3p6VMqH*e4D#M#}GTzQ7l2mPMo8a4Ov6&*x_TBQ_*a^Z)z?See;-r2l>$6451*~s9 z*2hJE{lzTS7W6&85ux?3!_}Hu`N0syI;p2K|oh7lG z@n+^?Cel00!X-N)*(n%f1wg`0l>b$#Uf^1f`Wev4gA(UvUnS~)EnXF`Rqef+fd8qg znxm}Q4Cw-zj6rO2;s%#bTXHir)F2ktmhgC>-MKEtAj^1cU`aRU`{w7&j*p&AG^8n} zVSuf|M_=59(G?NCle}L;j+QDcNt*JgT|I)GHkyWtX^t^xH~jM|*Nob%a+>dmnU0n{ zGURTXju*Ybw)hV<`H(>SdNxCeT@NwI`ZXjh34en#QLhecT7bbf^7Kr55S}B_8#9_? z0-0k2p+=Dkyi-jpp3-_H;u#k@9e1sQwfG9rsg|D&Jvo2d3ZY)li1X8wG|t<-M` zdYbP=fh%Z>ie;UK97-xvqGy#-OzMoqs$a2i35EtrKLiB@dt6<XO1_#}SYIb5T5~a8?``d-B9iRsYlu?f9C}mJ42X z|E9et8ke_UJ#U+zlk!C~=m|QT6lEIw@>lTuvhddU1}{o4K@!+p-a%92FRk92loO9d z>FA9|#E^W7_{|n7?_j^Sk+JLUt?O=~LE^BEhK$~sKk|l}kX@1RR$EHk5JPqYMLb#g{7AaS$iCZFNl8t~b^RIKlHa*r z?X)(MVhyiP0R|-mg3F{H=|ZMRV}*DzpSkD%ly1=F&%dMbUH#MV^UqY7INJ#bb;G4c zKDM+r4hola?H#@a-*qLal`cvOazg+WiFl8z;?)ora)6jXDL$di$wQMl(jZlyp7lji z#x}N}Ff2+E{?dGosQ9jimcM?Nv}En_$i()=gEwb}%NQBWyk{^Sb)2wsPXCe4i*xdO zIkc&CciGFlW+AgApLE43u4&SmkkL9U5(x}04&LwHL_*?L(NHu|=Io}C_^UMiifahp zWn`U3kAfjvPwuLCqadr%r1dJf*hu3s*IlpYZsM&;Rby$z@a+iB81(AW<+>a@2L~hn-y73J z3YUAIgj)p*A$10{qs-*~4lGy65L8Y=D|`F(q-p#gqcr8ia^?qGSZsu-_|X^NQlEb% zbw%wwIb3t>?{IB!P|%GR=iQ#-NW>sh(@jc@c6wD>y%*Mi;i){)pv(K=XlZ3-K0Vqk zJ%W_SRUcnA+`cw-Q_ud8n{F?{`sq^|{OL|y72P}m&0GC(a{f90!h;pZa);YrQ&iQyja6=aDu0^bX0b5L&KyN1N34rUtrEUfMwkgbAm9LJt|6 z*4pMQUZxi^S5I(Svazukw#g<%gZIf`ntWeCs{%iE>C=%7seWr9|h;O)gBzDCNd)xtLh4Z1De>a|7Uqr+KK3 z9jnW{!*sq}f?ULGd(Ig8>@=s>1zmmr{)w#uDt+q^>xVS{l|Kfm`m%j${j4TvBAnq% z$-PCr5qnPS)5P|_s!D?=(QS(4>MI)bDi85{i5Sv&SV?~>$8#}6A!|@oKE~ttJg)Sy zq{5y75Ex`YusKy+Ql8h;64JeRotTo=lk&9=Q-C}{T{}DmHE$=wi}d+Kj3rAbJkHSN z<*_4JC!0qeh}NSdtAuxWfv1JbuKKP8=)fs3vM|rr-N8#WTVnfO@B<6uZ99b(E!{=I z(wXV;19gfngY&wQ-<|O(3;1_JmpQU|PnL$(!3ec=3v3x^BSZM%R)yhqrz8}de7I&4 zrYtg_32+t)=pU+64mNNps(*yXyw9zx`ce3DOE$*uz>~V{M(}(6_qWv9xNK}in`i{P z8M-_QF8Z_t=h+hK>=o-~&zXKEd=o=k6*iPUE;rD~{@{EjzKU0}LH*-B%^2WL7D=LS z+OT*2eyuuBrS?VOhHw9mlV!$E$D~g!V%}g!6la0JX|{k;+E-5}zWr2cl~nw$Hp~az zyde&hXfC(rZvnIfP(42_ZDvhobp{y*t*e1UR9^0?#~Srqos7HjShC#+m%@lIOZjl4y+E#83=@kwcgE* zuhj__wCWUmQbw;>=>Wty77uP1LHdy=?{jJNyT*!Pyx0UPo$hnCOZMF}6Xo;;kDMVY zNbbQ2+ImU7SuRaU!4p(vb*-a(>6WZo?MLRx*fq#f0d8xF$&E(bP>7~-`%cHqQ#5Yn zmi}?RZ@H}Pc;*|l+WpDg?~deQ*TC2MjYj)|Cx?Y%p0e@Yo}FK7wR`%OMt~D9AHRvw zyUh0;x`i|I7%)wzGqp`txJKbzXUh?VfN#AS69L|G?T6I7r ziE|Lc#m6~fUQ?mQwLZVFC7D^xf?b$Z^z;f!Lt~XK7a5K^UWCS4?U*jP&qZTz1@o{3d$ z{DV3&J61Q6BAcprY4FCV8a@cM)bk7{L2M+A*u}Xbn?0rI3}Zt<#@#9i=K1`BzfEXj zWP1G?`LOc?;!Z64S;ee~@ocA0%8I{`)5Lnq*$FQ2ZCo<`$RoQvlTJ;#cX`(CC|U6y zC6F$Y5TF(#FelgHTz2&!Tqfh8XYR}dh;A&tVDAir!FeRe5-Y>6K6YeF^Vw_0%sw>0 zz{4FS`HSakJ;-^XCa64d&h}h8Z?GT;(bx{9xist{d&SpRznpK>lolt&23pw0Mx?eP z?L3Kf)ug(g5t&ALx=coc6eB6h*^+$6TNRDuDH#NVlIYX(73JJn0@udc?7ZjIr$-`K zBZ2$pCo3`ujp46=G0GupbHXwSACsyb5!Rj8y*5QlYFO&Gv#&`kqlJV16?;e*@wI(; z55Dn7Cxut<3xnH?vc}Rtr-qAkR<`Kzgcs-lRK)NH0UR#HtFo#c*gF|j&ZE>mE2$M7 z{Fe@kzVJ(;GJ902eS6R5sY&-3Ja!6K&Eqg;>M?F>qNXb6Fw}M@L{`yaw0}{najwmB z3vH@PtHoJ>cT6Nt6^rKHAQs3F;V1gkNM`*(`1?&4-{ZezcSG{A=4~0_qOChJgT-UW z1(28n^=ZkXtq9w_=3dmQu@OmZ=j@vIQKRKG;v$%PT2W>YCkm&`TxN8}7h&yfRP>Rg zVn)F%o>S^v@(xkeFg4m8G6wMEMg^}vj40sT)q9KLaeB;b%Tm#cjWZTzl{c7~BrVbW z>-N0eA91akq7e!Li`-(pq&XPy{wc1S&ja-HRvOSxJ>l46Xm3wpkFuKf_R>ws__Yc^ z%OkD~eo}udFc`PxIIm~Q2+pc4~Zc7?2WyB1Z;^E`8l~Xsb0IrgfHNiWQw9abR+!n^3ZTWcjL3tr; z{!xJ*U;TQ&rlpDWt=SeWtk9w*`J)ctbbdL7a7(`0z=GUI`>!brho9sgQ;)ysg5o!P zg!l!6oDviHZjH%ZUj)UzN#J+lr+?xdZB#7j6_YS#3%$tjxWt=(la5M-h@kq-O>pnW z2#vXdoW+jB1LWbLNwV!z+_=EFzvyRQEZ_uSnkAGTzj+ktTbIRW@8tp?Fbrz!Ow#>>)%7BSB2&y%>^!gTTKXg6hys-ax=Zu{Bz4{HQdDg7k%1#hl_sj-F0vcH zCNeGcq$zWta^y|HNY=YK*6=KTfducd7@GZxc~f7;^dunddcaPd=*vsVM|V4RZdWu- zfO(*adU&1$+9dfl2K9EU9q)QBwJP zhTIe6pM9KFIqEB{)*$W6%g5;|-zDf=3Zl)%YO7$dlhmsYYJ6KB7HGLLqJ7TK)w7#8 zvU6Hqka^JBTcB-B&ohORXV%-B8aYBcYjpTR7)TTCHdq(9)*9>_T2S6pK;vj75`F zwVC;^`NVn8&W&S0rUa&*HIbRMh@8(|krK3gpwq*y;F4oC2Cq6jqxAMTM3QBkqZ<4t z{b;5akcWH4!1{&qZ&G<%TaoW1cq9ry#-6*-zjv~y?b!mk-JW}3^f2jQS8r>!X($}x z0Vppl@{=EZ`KD!p94cP!sSL`nrM+!|U`5oA3TLHvgohbn=p05I^F&b%AF*iB>SM?7 zaUKsqE^HZ5%Yd%%o6o(B680sa%kUB7cFoaCj}aMC9O9%}5ZnPB+o56PX>&~bmC?_4 z$>{CMUXwV9pQAKu64E$aacxjf)+$0eur($ZmWBcq+Y73i0`CvBH-7&7{8=U}J?d79Ml!@HJXJFA{%12PiWQsV9#|}b zl3`FTX1n}yY=d>YML{Ard`ObW!XoYNy9GSX>9+bk=ak9-_;@iX;@1cro zN`usxO-T64|KbG*Ol`9?J;?hq7SCE~5Mi^5(MWWaI=KE!F0U$L-A>^SY*g;7nVT~R z!~EG39@W6MZIw~GH}AxzSy&^CAspng`)p_7yXI_ zlwZt3f?K@Amhu$Ec0_i3>-*w{!fTI>ut;3Dv-gm)nCtlmdDXq9d@zw{I z$UEm%qZM7^u`Z1RY1y#tK-tg_WJVi-*eC`wvlpiZD?>LgXSJJiC$#cznR9dMu`YZ3 zj6wc({3D6(bEic$iV3CLfVn$V&MJ(O-cDjKe%x{n}M&C_J^f5bMKow?p5Bq8GZM>=Uo56?)Ce@_}q6J z9J;&XJ!TS)`9)of{q92DRp^T;YBNsTzAptWO)JO3#K;i=Exe!5aaa3ELjOf;k_KnuB8s`(@70Nfy?@80RV=UUdVPjXlKGf?8 zX64Tn35%dllieXSJPNtjD{7n`Dm*+k0o^zixfLsvHQ3FEU^v(xs0pk@7Y`civ9;my z1jPuA_s2zB(BvQI?B{c-*uzF6?N2X{yC0lGg8{&04zS`_vm0&}kIPC&ejD*%%9$JZ zi=|yb#q}xdQjwtht}yC1kOR#v@c!cDK>&$_bX+TPoScqoVABM7T@e^5E3cpQ3SKCZ zwWaYhecoOBmqIKY(u1qdTIRy|&dJQsZ&{{}J(Q)DJP98rCmkh82! zucm~&9yBx}Sxk@4g0j*7M*Fu{1v>3ejFH(Gz*92~h}vsmIrzB+?c()cj$bt<&2X{v z)lN4m`uYmVkB$;?%9_7)d zoVs026fqmbaVc`JHgmJZp)Gvl^fZjXlt%o;O1@^%YvB}&(A0RSxbS?bqP;(Xapx@Y zkY%gy&f?qR$b__0Qjx)gm>K~oAt^((bY z?}$?rGS9^|BU#PyrqG-mB4!BfFMg(S+9n=8rvry4y`S|yYE2Kk&*i@b9^8&BhKI@G z+nvx;^HDcrPztK|{)Nlor;Ap`vhdxw`z6oTRo+R#AJ!dh4F*p<+F!x45ZLT&oUf1h z#$aox&dv>u7NWU$8n_cc+1vlGn$7|$sxDf*1uvpBxaqc<$p0m%6Z&Uojn5wDoht|J`cp0Rzr@l|sTApDy zaxY&wlXd?p_U^B-dz`@lO(Lik44UO%I+x^Mbw85(h*G|s6S*(*diwvJ` zE^k7LUAwW9l{R5iEq+7%dSab2eeoO4MpFwUqvAwRQ!9nvh8r0vw(3?m(Gvh)5}8_N z4H#m+a=WZvq2Xeyp4@rb*mC)D!h4-YIxNJs@Q)3F&*pq)Uem@K=jmY`3wksJa&_d3 z9Zl&hzuC=L;rXk>xz{5+Y0z5frqRBo29&1DEM1TjRlwdceY11x1QfNNpCP|xZ+pSO z)%}JB*b_I_Gljt=*8_ zb5!yDw8h)uLnHyWw`j$9UmSWtYzAj%aWA=CuEkmSeHW6o_ob1N$j|zRds?|ZsVg_> zht3tE8NoJNy~))fIya4rp)G8nHe5D==Qu~d*op9;CQbo9h0jZwT5{W4LOc4p(uutX zgSq0fDt1S)bOOl?u*vK3=?ZZAUl?&g7;%S?ZAP!?xfxc25xL#tBlVT-qYNklJ#%nA zagf-@ooKN~0PJ(FP4)n5{y51ZJWF)-*NWU_fI`n$T`?A5R58Goug5as)ZaxxKQTp? zOZO1UaIhAkYL)v?A!n~apk3q;VOTNaV6j)gwPjQPNda?Z<;1=t^!l%CD8o3yw%^^I;X<&LSGuDGN2e zOH+3WmBshq?^=?V^VvToiP#MEX=`!i0)zIW@kvw^CK!M=fyiuJ4Bg^59o;a z%G!qjRc6LS+^6Zd>|h^X=ZiL;gSax5i_X^+q;7>o&<7M0R6jqz&+nA-(^tr2|6a(& zBrnm9-d&#MB`>9Vh-{|rWOyC(PKRSHv$AC^+x`)2Rl$M_x+A+BbfU3^&H?Xk@Woez^!4H9?;5Y_ zeAJ~Z$(k?gyproyhc#*J2XniNBGSRbLTYx@G#q~8>^+kp?sv{Z;-CsU-X>vtw)X*DQrL{8>fr?Hh z#7?|?z26i|5oX=L>YpgCjyN^#WCSG~S5MIQ4^82s(*~R7U)q-*d+&UcC0cw-rjg!1 zhA+;kpjiU0V4G8`w^^a{HM48+Yf~ioizy30iaX=e`uVJ!HP#6PWA!xl=9{mlv@V3y z+Si*v(p}1-6Bu5m^E7y8#YWA^*-j14ia#s_Lg=@7r(xXce4%GPG19bnZXP6M_MMGaTOF5Gj8)cIrsvoBUS z=v;h#HKsX@J+V$BGEIc+{EBn*a;pI;+zat|wplKQ>=`XKhy{*k?wj#0NsH7DkJCmh z+@~pg>jbSadYV?XrSo#9zRLK+B$D4tz+zt#f%ZBrgVL_nEAJio*3=`?tNcSea^%Y} ztV|3aC20Izk%O_{lW<*;{o(O#Qg*=^pziYHUrL|kAWuU7JLU5GB;qW#>Y%Cc7|)K} z@HQ`}q231JIxK8$x8M5F?Zw&I=r|>BgerAijIA7hf9-~=pg`1U&Q9RjUo|dUGFaLl z1yOoGDS6_f8Mlp<&k4BXU4w24#s`rW;JYcuPX21+3+MU#?hgSwi8Xx#MP@LQ?I>Hh z2I?Eg$64;S-xEEngQE`TLEB(H!H681s=c{J-pR!L$-2DciU)03*0iCKtpEfC*h5E8 zSB|L1dhvJDx4}@}Hc&5@<%S-1+P-y4oKmc>TXIG3)fjPF`t9r6PMg~`V9X<4ZR+it?&5fLi zg1mS%+^QMs8V)Vg*Pu}3(2unJ4D!;ZqiZ<#@WL%;BK{wbMt8(Is$=F-kmaj6d2ODS z9?^rN-Hgon%MpU1HVD`z=BJYjJRod6ZnWHj)>Idk)Ky6e$Hm9T#{Md$n-y-;YcPyG zYkWAA2Daux9`5|32I0K$CBRNwN}l9EOUjPBC2~h6gvNR1oC#|lh-)5v1U?fi9;$Fj zQ#J33yb-VCJT>#GK>wFz^0bT#1Lv44VvdFwnIp%aS0aKBJr9^kv@SWc^qwf6e-~G7 za9S2<*mCR$(^CVV{~WzDa=*mnRx8{J5H2e&m!@4ig!u#=yso$9Tw?h&Q2PFteCCLa zy2A>`>+S7x*oK|`{RwK0EE@jh2(u!moTgQ)W`%Fs8-DkVM+@OamT6!hEdz#IKE$Ae zf>^m?6wTZ|@UjL7*gZ7JKwK0RjQh8~+V0H0`IYT%_W)9bv^I2F#TaCRT)5;l4?Hvv zj7wc4S2b7JOE2Z1YM=pS-SpbBX(L(a*gH!3`#2kF+KIm|-V96dsR=%{)iUsW<;6H` zpe9bCM7xDK^Wav!T@|Gdt*!=G11^(V8Cow5N%RhlFP;9yx`9@9D-lz^!t-jjH^hq@ zisZqHIzobfxP8j(Ai|eKt{x%DrgSSg@OVO-kF>c#tTrW;GIjQ$C6k*QMOx{#X9HF7 z-cLq!sij=HFxS_y6FfDYxwu@t7Pj>jPT_hxvBP(Ca`im%v1Erz&(k!GhCgT<^a7F|KKlqQw#KX_tY5=JwgeynJNe~h>A6Y4}{ zy^qoR;vL`%q8``Yn6;3CAdrmG71^gkv^MYlTQQZdlS&A@hy%ZD0EVpJY1t-&)M|tN^5mW zqHX49Ww9ih!86(L{Ie4jF2%p%g%&LD`GDXz+CHgoFX&oC*as$Y8#rO4j@g!sj&n#UUl5ULco(DS?lAMJQ$)1+tLm3%3>^6Z zS^A&oLSH@oxj4IPl#Zw3u-=J=6zTM9-6YHe54)ek3afczM(&=zr!Gx4NF-A2+n=ew z=1+hO$hv9My@?H+iZghE)a}c_L^!A|QV_=^Q?Dw{#Xfe$;&~~~6K0gmd1OaLavv2} z5Me)C9B|h(}(xpk#&i%@xv1!n^Zy!cWDG>Ix^-#L#`)rDQwo=1zOZ? zep!H0h>$_{kj&Cd<20gZs0=msbg}IEquXB=dFNJGsT&dohL`2u5aO?J1`9U8zuycs z@f^O*k$tx{SHjim?n27EB1TfFghQmXk>&;$D<=tEvh;FpWNXG9-Mk zO={=&s#QreXxIq};#e#$Szz?)3C;%+Upoe%2?vD&k%I1?3N`X|y9&b2*^QvY%y)A{yXV}q?Elsgs_o+SooNHkBp|}m81%!? zfdhg5*2L_>{v+OBoW?avJHQD<3I2tBzy+dXH`$ z2?+j5u>AI_9M2;h>AO!^npU=0kp-wM-V+=|e;U1`nrn8(1CEf@Ynzfjj+(J;!sT9r zy<*a7(2}R{plx+0C9OIOC!OINaQHGaPxVIB_|~iV?4J}7WltHz%p}jvV`pm%0B813 zkoq4AoKzhW(WJz2$jg`9=XOsN)BK>KBzRblShpdFHHxqj7zY4EEmePm=2(N(a{JJR;ZLzE|_?3LLb_ zlT&{O6g3&r59?1B+5c*kV0bfy`y~Z|G&)pOM=yP3Xtx#kBkiupHZmjII{ccLD4AGC zfT?R{rY1ER)+A7x()(T~#Na#iGiiuWG$kLT>@l&)|5T4z%BeQ0(~cfXncPzJ!|l;Q zFILXD#I~~jL@~#+#_V%J!V-Mla-``&jjB2fdPnt@nX5RwI{dDF{n0`q(U4GY;5k6P z2W5oS^p`TXMP(sKDWn}A54$H@18458r@&gTu6x%JaU?;7xM63a15#BQ&i%}YkpC82 zXgjg#b--P5c0SA&{ncGeO#KtOUFzq$W<)ONg1IvK(n*#0zZR|$g>NQeVM zM&)yBB_B;Fa!zs=nTe!?Qp5kj?%o$nS~&AU>?V;{PFJA;?{xyQ0Oo*wG%s*;qjYF* zRmhwx$qY$!a9C%R?faF?zl_N0x3gPvo+osj6rEvA{dQYqQbv7#z7mNBpMW$Z?fiwtxEKAAHT@8FCK#U>zTps3buiY&BTNK*{p=9+jCw=xgvX+kF!%)#CO!j;$ zE4%m|?&o_u>|ANHvgleZlM}wsE{c1Wxe`Q1ys=<<)+shEpRZYvFVNDWQ2&Z$IGNh*`DtH<=(ofs^}UKXrb4w~_5} znjd;&{P@%-l&W`)ijbQHH>sPqAR}E{UPlbuOhj?E<&@X73Y_tL5ru*AP3E%jdzg}C zf|4bWuY8al@901NrSdrVB;jUGO1JUSa7O4(u%W;yoT;FHio0#5tqS4Cvfe+_g&zv? zS|ynPy~?*tSN5%W>F5jMAF{>pC5qBZtkTOUjc<}3f0|b(;)wM_R;L*Feu~%Wb`p&G zv?rIaOC(O_E)7G`3nE=wcBeQ((OUb@-vCecQ#k!ODNLFcPwi|48%Pd?*b(4|6=|W1 zm(BPUbc^{{?OvgNzdgJ|Xp8t}bVk3?%dtSkd2Z@~AP$rSed#u1Vfm&xKaOp~vJy6< z2|csb=#b_D;=`!vD8f9xNuCV-d}7_>PBdaeKIE%=GXnkchH>4fSyTH%^!>X>39Dhr zMaa6kI$6~*;l>Q_e_DsmXM5=3jF>Sa?s(j5H=8c)*mb+}r@_s;GK{_+>9gWJK*)V! zl4E$X2O#qQ|d1)ygr?+YuoIDW$sNC=ZtUNU@#us zpsiiM&Cf7=T(H>`mt?tof`n7&u9ZeULsVWsyt+Rfd%`I@DLXBe3**PP2+O>oZ=2iB zv|hW~nK{#n1ikJPQ0wcgS-h#}8-Z7Wu$LK%#`V700@RilC>2Q_fb08vjWH=#@$jxA z#%!~TQ$*hp@5lRynG|lp<&I(5861c44rA4h(0xgf-|YiX-@?teqK<4niPXHDQtQ-3s2yKx?fskJdLH3NJEv;s&Ab z4{EMs04~kfZWp85vxkYxTyjC-gAtoBrUGg2lYgW2x2*-z#}ha|mSAq&KTe%ri4b2#3n^)Cgdk^!=u9CUpY^J)k(-omeVJvPNqgw(zmZv`aIu-U-E_%gW~YOljDn{uxWn0-9zMIc8{ zo_Gb2C6TI;mn<40dz@!Va;8;Zw@}GdwAI^vtRf@&~$o+?Yzjka~(_kU(Dp67QIz=ZTcl3WOmYdn?Co>kkDg;8p9gjMqNPHA|)iG*hd?FK(53a<51gk{x)|< zDVJjZQnz4KBUsK-59+ipY9&8jnDbXMaQ2}2lZsSg0;gSDR7_fX`SJ899x8fGpC0}i z`=j$PT)W90ufHwR0(kvi#r5qHdPdxu+)-FLVDlDCc(@-0?xxvXSY!7Dk=YPVLPf_O z6v`J48SV_Zi+y+`26ah!?M65{DZ_otJL&6oY%{zg%Z`zk7#fHf8!w=z%|W|_*@lQ4 zdw%y-ytsI2#i!?ccXctDr%v&+qzW{*L#XRUbX<`$*&-6o!M)bECO2GsY;lD+@x8P3efdxK(*T3h9*Kq05T^SH3+gvn=lz zt557)Y;mE;Zt);lj=oEDRmU|^H8rmq8bC5&E_~;&?gV`>{zz~gwzG};k|p`x z4iO0KsB4?+T4<512nb}=pI})at~QD0kk5uyPE7U*c%%Jbw-8c_b0B=-m0?Btt~AT6 z7^Shs(dDVj)8&&^q}Z#-wvwBeKcTND8=Vf&pt6%xVv~0kJ8mQo>!dxdksAr958e=+ zPpQitfa>aDw=)Euh& z)lz|U0&I+k!-tHn1KqXp;@9JKPQWZ%XxQt^0q}6!J0?AxNhc$9DLvo4-{vP+&d_yi ze+&Lo11pOQL8zt815io-Y?R|W|5XDjS|VmZMA->%;POcCiq9{>11h&`%%qTr##*+Z z8Z10Z(}A%oKY#Vz=|avYbO;1=6Zdq_H@Tt%h>w>eP8SOf zae-xorse_)1+g|l`IU!Fvj6$}q!6D#Z-{2cU=qe=~l_ zq8vzK#P>|Ftz&6k7YX7mP&iwNc6Vv11kI!OTB8LT&JPJE78u3DBl+Hi40oo? zX7R!JEsN}{5&amEB`b~st!290pW~HCeSLv4p13$t8ENdl?(duFppufkEOu*-k?9+l z>#>^Hna&pRvx=E=8!KLQU+lTiC+COf&>i>2z-b8S?})}|gEw<|G?b^1ljT4G)zT*g z(4mjNjZRXv=J28W{FqE5sj}PO)O0Z<6^o46`su6eLzDJX=Vgv4^*(PrNfM?^+HLB6%KGY+raDQbqep9+u(^k!05 z%Yd>}9Orv~D-6GX@d6K3T1;43p#UdvxQDx`)nqwg12@<9Z&>=sDZa$DrRb>Zut~f& zxx=VQ7rZ7jjLUaZ%^ToX)%}^{kytw8acet6&S>&@73`3qA-rho7L^=3%D-~7ako+M zktrpE;KJlW#b|#ilkH3Wr4C)5h&|Fss^-CU5x<003yS(HwrC}euzurCT;N_=ZA-98 z&2LGWy?2Y;C)*tuhspfG6?41OTnY4&c~Mw6v^ig21L_FTWCap(E$Q#O)nkdoZ$N4b zb{IvD5uwMy#*oH6GX4@rT*4de>s@UF)~Ch#%p)PQn6K6a8iDxEWb>)Ae6>BeeqLMC z^BlCZCJ*%OK*%jSj9EHJ?^9k-Sa9BG60Hdoj{rX^*&$6*P*?Hm_S8xz8$ z7O``3DStq0UK3x+AP%L9+|M$URK_k>r0_m|^?p1jk_<;fQ6(uJn1Xam`mjGoIns1Mjj;4X~rD_vXU6NShG! z>6x~t3Y{OT4H%IMuHIQyuvd#>%tjoER%0Hj;Qo;V;Vc_c^a*;r_nh@^7N%+w`+^=C zl)F+r555>Md~~^+vn1#Qr+Y=HXkk|F*}v9sr8(^nzDVZ_!PVf)M{~=_xO*QZO(nl- z?TX*)BdC6KPy+{o6xD5&zsXm@(r%o=)F;kI9UkVsTw?1$!0#=((%;|*XlMNWzeGY283$}SJkNnNWDXhFU2H#mH83a`1taB zoT?`ku8NA>u&O6@54`5nQ{FlysCqo?b`o2HX`*OsQsh7jZvS*@%chbK<#d-DGkhS8 z(S+%v-7Q)y7Gh{$vDwh3)d9ud-dHVJjqyGWxGip)?+|sdW}A^-g6cB@9QjDBm#fm_WoNNf(}u;p zHQ9``SFOKW_&Lx5N}sTM(x#ez{|L_rK{0F-6DlMkGU%Q&cxpAl;qlp}>+k%Y#iv*z zlQN#uviqp>L)4dJTY}Id*UHR#v@t(UtFKp$0??~9n>E4MdAYhr(Y+a;PcO8%AU%QT zLLXt;f5VNMSew<5f6-qECBC^_4qnZ+jnVUkiZ8z92(?mB_xoq9vknDpOF#Alf=7qi z!gFccD6_@y(4dLrjx(n0N88O`uN-b;?##U>zn2gN23s%ZEjam^UA||{(537#s#|n7 zIdaT+*mRO`i7?m?Dyx3I2 zJl9}VX~q1pU>MDm9Zkx7ZJ~E~uiVMF!zC$)?rjraX*cK0ny!LCV}y`n&U z;FH!h>POyG_nU!^%I;8Lgas~xn`3LG6tCKO_JDB={ILbM|C1y`RI(* zZJs$amz=geBne;$U}p%YlK&>4;-*q-wPsE7R3|acM`W9D%3ey9|OsSueZ}(fOTO9;vhA6B5E#SNu>}WAAg#`Sib)s)J>pAoI z-_~~g)~OS$K9~v2{`vi}*^$-2tZnCU@OC`s*S{k&CNXc7k#Ze$Gn|CEjP(*D&Ww%1 zdqlMW&P;bZ|NBa+>aEQKOx3>fh37QdeR{z{Jp3v!(X9Fe2Rvy4wLh;z*>g*^SNcH&PLJ@=aak1gz0i8WeySNhG0l2nuq0*T+_xf#G0<- zM)3kG*xuSr>bbopx;-B$*mH8zvJG>swfqD{LdOYnW}PDA!5%e>+-D$`UGg0{v4yjR zi1Jtz#-svrFaAn+rC+?sGd^0INL33WEFGXS03BEf{L{ASs>AC7mQBf*00`0~bu$kv zT2ifdHE5Z2CIt?T^Ia-s_U~wfQ1W96WJz2_V)$ZOFCHt`tWDTrdG>rOH#pzt0S2%e z0h+`{eDtZe{0nu0BGc{w;=Wb>#WQ0bA)nJ5|GzXRn0RR*?_qzrD`X`ahThlkH-j=mS2wS z;WBK_PmHPI0D3sz=u5oQ);q}^9z)aTr})Pf7xm9`gR9ZtDke(>!RLb(RjU2vB`S() zeSmq-s-)G3X(Q05gsTyw4J7}~V>*mNqbedA81=z2*a$FxL~Zw%6|*O=ih-KBd>IIG5cn#udaY7Tq^CmvHkzDg)F zW#r#@^|@iqS-e>9iPSzkB9889*G^RR9$&fE&_?v>lBxEb`36c= zlw@MB))`HD%?ohCUkA2WOKI>QrhRv^)!2-C{8vJHRcT;eFf6mZ*w(9CiKShr8TdQC zGa)|2b-_Us0>@4*no^Ip{xQ$<5;U?|b*-B}m_eB~TM#?G74$(_C%{6dBAC@b!0D{i zBmQg<1JAaPC`oU1pnMV#V}hv+gBU%R#YR)!Bc%nv?y85rpI>+uAIt=|g8sHf%x#%4 zB71qPi83Y73^s->;zbmW=)?w&KPD8#QbjT~`}#l^w4dz$h?zTsu^F{Pr)!RG2EnQ8 z+m)3yb#Gb@CGZ&_VYSi@)rsM({sb!AmD#dA-UB;oTj-4lZj2Xu&$D7OV*=Xq+#Me8 zMVAgEa4Ed|{59vdZ3>-UavJ2!GNR3yUy~ow(CNrCrXU$#9Dk-J^Cms&PjxJg>~Znc z1Y)F_uf$xne{c-A?!DI z7L{aO4+_*|mo7oC8Ce}r)5^QQC|gs48FG(t{M3-N0LW`_3NFD`EL(ZgxXBHtn?y>& z3v}N+7ZL(oZQ=>3~Ll@EnlO=9~8MXP~B0x7%Q+3hBV4#!H0FHt<41aScH1pYvIiDhAx zvEe^Y@XnBx7~ttq^x5d%;oI=hM6s#Qn4VE3qcDk4k}kK!;vC>to(BqyVc`** z0Dcik;J)uyZ+w>z^_r5JXmaf#nb^Q|;fM92`Q}8ftknmtUk7*@Wj+H2#~@FhRPPX- zV!bRcV=~$8FhJb??#2LQZXm|#l7FF~`uECit3=|va=@_B`9>k|S2+DPj#^TWP(Egk ziAc#fHR?yPWD_5sAAP3c+E0Bhz@cN$HV}X>!@8)(+zt7T>BRg`5&Bjjj7b{hLSa`s}N2g8Z)4XYW z*SZGIyu!l3lgaR;E4bR+{FuCJHPKJ~qT5&@hT5m-nO2`Gf{A_gF z-u=K+f%MUr(2NO)*g3yTz3X3)0g8J#nvdecGM+xg+LDd-4N7Fku{GxYrUssdu-hLg zZG@*oQ4dPh%oAFV#XtM@UuTrV11fsFgfz;UUI-ex`xCskPinDOlzHT1mPzwCH4;!K z#BT+V6sq$+$i%f>cY;R{VQ4s+jbVe8Cg!^h0OJC%*(Wrs9;)KMH{W|m$iMGAQhXfQ zjS)kwdw#L=+7a6^7-lquwx)~x{`}o@He{sJf2up+N%W2x>y+y}^0@ey)OYBRZa)3J z8bc%05tDt>2j)f`_kn+;Rj3}^CqRAP=fqXuby$N0KMAUscAaNYcbsGMq zHeqBRGEN$rr-8VUWZ60$$@|tUh3D(pRTo?#^`mLc{g3@ zsJ1@ZK{z)kwi%ms@(Bs4*r*1MjFv4@itcFl>5yqT(UVlp)&QcixUJfnuuP$bM2nlz zjnQ&=A^gm@?b4ao7PVGrCx3N=F+gui&4RJ@{=6(=YKbKgvpLcS?5Ni=a_ZlBFi37y z7R;Y`Vez@ik(}lw4IO!Ijc#rYnbKKgv=2|!zTjUPxY4qb)8t8Sino8;7QxNVsFtNI z-=lh+(2O>tTS`9TXYM>@nmpxC_KYXKZi4_TKN27winzl#xaLiftWs)69zIH1 zJ^Y%b#}NsV0N6<)?lD>i0JC!Tg;le5XaS)>HS7y;$*Cod`wG*{H8VKUBm6ueB{f25 z<|vX=P1Zukr_M+_oIiI#lY2PbviY~{>Ay8@GEN{vd@J{AUspcF2 z|I;bZUY03YwkcU=D(!=FtcpUF=<+s4>n}33^OPSlE;)ZF?L+dWFl7n_WD2){EYlpT zcR(=F^J*_Id^YIzYUzkKZ|Y?`)(2H9f%#p}{pY|Z!t9z6wnlt6fXMc;bwZPw$%BGc z$;!KDw{c0OvZlvDzkcTS&-mq(80|Fv&HLTF8YX3#mNzBin#`=VY-@~<|LuRKsM^Bt zzC439jrHG^&+Td=0Um$-O#O zmC@067IrCr@#_ac^~_6Or}nO%gw58NtQ*>Peu-Hnf;?q#*{}&eg3XUlc~f#m#qVIN zz5n%`#Mz)b!XyVm&JSAZ>)92=9{M~OOKonWMX6%kJwMJ3d6~|I0aCCN=Mz)r#Xnlw`W&9=YvN%X{aFEDLG`QFH0?lw@=pvF5Dwr7t;O3K1?aIbaZG z#f2!B9gQ--x+2n!Jwg1h2iSjXCUz`oVr#q0%dYyAJ!O`^7_6Q-2xIJLynbPqfPqK; z+Lo)HtnO1Dg>R@Ye^^;J@WKJ3=H`3!gY+ijg{Fqut5-ntDBxSfh_?L*09GBS)q|3> z_`jTX*#C3diJu)#&q52Jn*JlW+x~wMi8udOM56wAZF)YN`~NFZ1Uw<{QwST*pFwtC P06)^=3Swm<`ak~%bMllf diff --git a/docs/_site/site/user_guide/plotting/plot_decision_regions_files/plot_decision_regions_19_0.png b/docs/_site/site/user_guide/plotting/plot_decision_regions_files/plot_decision_regions_19_0.png index 041df917b14156379934d414f23ce206903adf4e..a7f20118d25ef898225171df98391373327c2747 100644 GIT binary patch literal 69356 zcmb5VWmFtp(*=raaQEQu5}agk2=4Cg?v~&l7~I`0xDFNw?(Po3Auu?1^1Sz3_xGLE zv*_+Q^y#Woy=#|rl(M2U8VWHA6ciMitc-*z6cmgG6cqF>5(4B*AEm%9K|X^3mGe-C2;G{r?-l=HzO{PK_%84+ZrRN><{Fx_9@YxqG6OD(q_=h9 z2K4_9qEmxWDsfT%JD~dixvR?$h+GT9G+Me{JqJXNQLmTKC19FnVD4U6gT)J!B@ieF z4*-$WGZzts2Swj1=|VAg^D%cTWm*_ZUex7$JTNa`3ov)vnU-?dO7!tYpOGm$P$-Kr z`}YG$aRI{krmaQMK-Wm%P0_#0S&y`BD4*O*)UJIAw+1uoufH*aU5s6*K_HZmMUfC0 z&?GB?vry|FglR(wNl!ju0*fxOtF8I!TSKh#99L6VEMzjlQ+j0WtR$e1@ z<%O@9fEN{{1}!8NhEQ#m(Bo~lGh50*HcE*o%w06fLCAi2J|J=_6w~Lz ztrdy#359Y9x)nBgow|yBl<+ioO;vS-YS~LFEd!B- zV%~;fJ^~_P0g;FS@xM?hMX>?6$dsN)lyr~r9qR#4otUPm%50(23p1OaDF+cKB`Pp? zLorQ%KHu?E4$=c6VJIaUFipEL3u~U2ner}`s6!vbCa!kM(^nII=kOopif500X zo~2(1@Pt?~4iBoS>8ab|O> zH|GL`*86w?xg~??UvQ z-#O~}k|!TD1@@##e@FO(^%#*^VN1S_-o;+BPE*pfok<1yj5LEsh+?0n%9g0Q8nP6! zPQ2OxE!$fWoTz${s7j9U69WwWPG7Q4Yha(Mia+n#qk^Oo#a!+KtGyris-GVRyvb9Q zs{*=;v2kwx5fw#wXa)Y1k@FdR^{Kq+0CGvo=@3cy@#U9cQmM(=B&ojb-ufc2Jqi>9 zNlJhv(3_{8#y^3sAmD81X(veIL1{{ixuhDX>WIL%husAzD9@6F7`c9*IQbRE6g1-j z6XSsMZ{IK7J4?u(aIl@1$yWqyK(v)j=LU{ zL!Qw}j*+aFRKJzU&Mesvzgin<$LDfY_+kg%D~~zEr1uNs0ivp0C(sp#gF;IVzKzr~ z6O(G?d`!XPuj~v`d48qOG&KLd&+glBH`MX5XzHHZR~YDI+>M)b^l>OfB+5;E01e_w zguSs5Qo!UOZ7pbrSm0!%&a01?_Z&-+vlh!t59|V(QaiZ(T(_{WNfE3ToO7Y)Kd@dG z6npA9Uz+c>lL7uTwO{TicjO-1^5aDriJ|e-c`@R5o<%jzagIw0(<{7K!+CERH@+4OHy33O0lv@68KX71*ZER%f(G0=kwzw{0Eu;f-t9 zb;yBi?Um%(rR4+=e#02T93oRjuf z7>8kH7?g>NuA$@7RF9*ld%y0CTv<48y?`I10pS zSh+Cf6=B0R?Cob1O5vztw~*m5OdCLCd~c}WYHhO!spC*$~M%_=}aB&k|(9{@?|r!TfLLkSNmB?3%q*mP>0 zv>-H2Kzz-w1CT5LsoQyBmjvA(|DL`)XH}RylD3x-5Gl0rI^RjQDUF0ht1k01Jeq{zLzN{inbig zY}3dz(smOU?W`b9;OuJtKwkTN`wI{$30<2LE3a;}_XG`!V9wwoOzqgZO}NMo-D%T& zB<)S{eq`Im>J@X@=n;nUhaIolX@sg>sTW-B#9w9$<$zWT(cAFBK3{i8gAsD39S*|Y zX5Ae2sJim=dbWIluTf^83I=jfq(_Z;Y-et~l?fgFu~D2Hgd1pVBlUtxYZSfrpHR_}!C0$Thj5&+j`AlJ^d`ie|IIq0n)qPzg7@gM2P#I-H0g#Q@YCWZ9l zPp)bpjMio!yNCWF4Hlq@0H9B!e^=lOo3O1LQ1l;lNtV)Za3InTJ`D``a3xfW$D1;9 zuG$?1y$RE?GaB;rVd*afp&%m_h&`T_{$o2~R50ahLH}boxLBXzHU4k+qo9C>U1^11 zJc=Z9!K|r8tHAd_@gW4gwg#IGj*XpxUcuCKBE;@omEJ zjFE+iY~53yF9#VADbr3{1AnkLK*i4sc^5l++2k_Y9IETSFA1F*1bjJyh z+@1u@!Lf~8;GeTehLv8jE2FD15a-h2sHfP?&5)>aA|Vzc3(CsX|K=Z6NZeM9;Yd9*{|Y~9f+A^#>8VT`8VTDo-0aqZo=#u#B3*vY z@9v_YD+Bnz2}Sbna@#G3TFmzGk$cHKaA99{k3g zs_y#qTBqR`@T?bvR;Cl5%kZ(&pcL#==9y8w@sqil4=?&~@FH8dlx5|cy%$~D*Ys~v za~+;PD@RNM-%#8x#SYlR3JTvzu&SZT(Qo@vB!skE3pS6@&@UYa z1kxM-5ybu_C$QKMfaiu{k22xlrY?#zI$nQ)MVrqS1VJfS!#1 zY*aYe9^lt-G|h=lI#Ob$iIB(o9HPVi&bqpx%0L`S(L!dr`tCXR*uV23*IBZvbK$6< zx3F6uJ=)n3?ByWHEV~o!B4pJXja&`D(BK z03wh)W(Z=$WlggrQV(l9(=M=f(5~jQ^l$$W7lxe8`(o13GOepN0l^W7YG+x#GeKM~ z>6ZQa0Ld14@l>K;TbB~Z+DjS&U%EU@<*E$2AH^J}b|wylWJK?%YJHCTG_yi$Ay^al z4=SSZ2XnK94Gr6n6(YFX?RRlg{`cFzm?#?mvyBg}8oq;h0_*qN3!kXvhfr%T*?$)% z`;!}7s2KZ;PD=FN2t{tRXai3Y@5~eNwr8s^z^P8sZ6`qAcX&(vbBXdp{!5AVS9Rrs zfo>&5+S6U36Ter;WlCf@-A;(3UELn6N0eqRqWdHAf~HKy?Y8RK4oPeMw`o@`TSsk= zQ9g|tg{Zu#^3XAr-c^(9Zh6$>HPHVCO+qd<~tJ|>Q+m; z-Mkd6eHz2{&2ej&lh9%ag8%#|KeNnZheHx0M}&OltGY}o?!HjQP=zQV=t1Ot!`h@bfZc5q4P0SdNmTT*a0OW!;w(#t{S4Gp zHFtM*iU}j~^V%jE;i9h2CwdrXt#=9D)fgDh&hHhdOI#-RY_k!<{XVTPBmz@TDXYVZ!q+ub~`B%dB~GsNg^Za;No5=n9XW-?#RvlH4IWuprid&oU==-<;VT zXf-m}^MJ?;20b7z7>6GtS`?q|bRZq@ zRlk~fIS_t{&@0@i!T(jM&?%5=WwN*^3|bP(UN)!#`s0Y(3Dt3Ort0v&Ce&#{a6u?R zqTrK9qY0Rkj(tVyk4>8M-S;4=D!Udj4N0i~&CH%$5qkkvsJef8^P-n3_!PZromzSy zdvh2s2fbYjJrbK)CFS?S^17;^j_O9?0~yF%#0#Udd?fZh6V@+b|Mupa!ta(c*LJ4O z6)KfCTT&nhcP3xwP~At$CvdJqL}Ixe+3f8eg&;O#u=cz3iTf{+u21i$GVZM!1=!-J z(k?pi?Ed#SQ~o(qCte~`y+499p*2t?AFECv3xSkoDWKMDyI~nm*2-ab>H-mz@KXeD znCP=uLik#%F50;>~9`X0z7C9Y~o$}ELQjb+O z|CSYQf~#EjWzlWs0uaaZi)MJJuqUdZCIziC{%UsD!M*!0K0;h4&+3K>5B8{$0?q-u zBn^^KIX1u?mOI2V7{^;rPJnmaGmBY)bEKi6+{v_@9j#f8otu%*Sk>23)xLYJnjLg8 zVe#~{X8OXtHv1t{4v@e#YC^B_hB9RY89HSwgOUz(L7?FdkfDS$FEptR|D5CKKwWgzJ_p;f~T}Gq4<_z>_dWPaS4I8%mWWM0uv+vH=H_ z-=af7#I`B9Oj*m$m^q}1&*R^pDt zm3%a47+D*Af7utgUGCXd(cXoQ65e7^$}=&4xMVyexIKrA{ z0L}5*y#-m!ga2uPX2u{fi(mXcx5c$ndQyk$ukUKp*3B~)NmNMv9@=)d1QL@b1x;#s zP!P^BlP+O;mj|^8>nX!XAF)+(D9a!l|D!qrTTZV)Zo1VWYhz^E5xr-PS?A$=HUF9T zk8MwBlN>v4^ca)3-HK&P8vR_*cq0v6i3H>s_^5)8Dk3YSijLZx^8{zS^MI--IIgWe z`ps#{#|WdTP^o5CO@gZI57oA!aU$mKC$O0{#F;nD^DS&DJ1l%U8k_U{5*1Zzqw6TB zp;Wp_>TV>`I$kMSiJ+V7Us73TMfdCHdvZk9;Ggc#GG)nJY65$rFKBDMV$LHu$?BgM zUX;;Mf5-^106hU7+B7fl^kk`=+qxZFpC+EKSaY^VG%9r!`;29FA2HI(7yN1>>#Zt_ zfnu}n$w-+rKguJpZT0R(GOn6lM-FlJ2~uY1zI>O0b-?yu(IHzepWkt=KbSibxEn%@ zk}daXTFmAXemlttGUnCVt=Fm70`3BnH0(Y#nmpskJgItMiAWo zHH4$hT)C8cQzCG{AT}rU$P80n`CaAXx-UbIM#6UoWDji_&MYOkSYb?xSL zv)kp-1wJ5FB)NqH6eUgLMJrc0E!=mIE@Hmv{`3`@GW^lI75GMORd)OB!cOhr`lQjv z^^8N%^IYS6`3>rb86XX>F%cVH>8r2Tc}JEqsoxQfRc3pVOEes5m{M(B+Nfa}6NjN` zZ+S;af3$f+b1fFD0i6O+!N|-4h2PCjvnq^58h*n7U!ry9MW-rB!~`)B=RT>6)S{X- zL{(8__a|OZnV$R7eClu_J$xYz_hKq}Nb|9OycD+&I{HBe%Eb6AH}Z|b%XLZ*eS+*F80vS|?&+L0o8pTxbr*u-S9)KWhp5@;Uda4C>?< z8H6DTDc}We zzt`t8aqX2mwg`K)_jhq`Uh>GyBuM0)zqmt@bSt2I_1Fp~tk>FhhG`i^xYklE$T>3# zLLpS=E!%Pqc!5N0eTCmDEhT8=Ila@{-O&WgQZlj}ST-$+>VGD+5LxmNW4U*R+z{E0 znIScQv^G=LbhKPe%UafO%zrPpl97?CyTxNzhecF0Aj2R5%4jPln>TUs%d$&G+ogoPa*@$S7EW# zVNLbK)g%4f4lnKL^HBYYZY(?11-xEa#*A)P=G#vMF86VJ?QLqUk>D={$~@Vio^}v7 z_b1?XJkU_btX~T!}kqAn3H&*%6s|jO7^3X0Y{S zAMAV5v&F#5VETklM(}<@Yr404x)4~bLaiD`Li&C5+O$X!=n89mxOHw`y4F$mNQPu2V* zt1*wt%&_^R;v@4D#LD57w@`j<=N)l8&eiGsH02#YFe06iJ4ch{c+^qM3QIb#YQQC> zw%Cs2AE#rwudZo@`qSvI%i6BVU`vOMZWsSn>GTm2#ZQyWRvD}*zo8s;EQUkONq|=a z8{K1{z`kVfX)^i|yPPg%LOoiMWF?!9k=D~qfWGHBv6vgd?D!KLRKyDI)r+xo&>azS zOX-PI7!@uIB=@=U@$Dv?B!0jag^h=*qq&J>)@Vn6Y4smumX)S37sS7OGl0gKjGPtE zC#|^f`dX@VRPEbLfRr^Mr6Nm2#fCRFW1yXIF6uL`9+^04yA;a(q{e`IN1f;wjBuTH zwC+r*spgEukjI2=y@$@Xj^*kZ?z&6XVE7 z+5r;(l;sz98J??mPkQla>vckW%>a4EKUcNwCoj%d8=fRN-6=Xo-67-m>r?u9Jo|C_Zrc_T)&%S*f zUJ9|H-4}hn9)9A+YZ>045(`V0r+|eya@-kd2pF2#>bBp2Px25+n-_S_$<9#Hl`R|Z z&&4Ecvypsx2ru|}J*zTaK`g`e4YP-{oxfL_qM;KeDm*tK@qn+Ft&?qeHoT-AL>FwQ z))uG8h~14`RDamF+)iDW&A1PL)K;0#Hcja~xH?xQP(MDW03dPLR~3ln>83~V%G8py z)O+{Lup5MuUjEe1L*^exNjE-iCyY^GdGKVdXiy~YRGwnqdcjtoNP9|E`2987;v9Gl z%wip+v&fH$I=Jea@HqVB4X3PXJb2QBckC{R*_(Q@H#0=b^CY8uz`~$QqOq~(E_V|% z%3tH~wK?FW|C#{qvfdydc(H}b_J6gFzvJpl$U`QWf7gio&0{a;JLvP z*PgM}e;RivE3m8l_=WvAx3oX+_1*l9DE3A<_R;^K8J zDdT|bOE^FruUGA-9X}9MDE{$?CD7xWSIg)ghuRZSwQj2bOOVfME$StUMrOEn&X^JP zv-RVto!hbdP}stxlN>yI)=yCW9skz@bzZf~Ljs7!#=b5^08?MMwBh`A6PyGLRHKB99+MNnBLSx;?Zd3YRU4k=2hU8+R{Kxp%8ZtPZ`hrjf1U<~9p( zC#&MXdBkJ+h;(i)SzM*)3KDf#%h^f)dHFKwZqd`99-SlXER+=+C+>vKI8gEZR1mKP zk>%(0ubE8{HK%P1Ze`%t+uApEqQccGeRbbYhLHpThy>@k1V=Yi% zttQYxV)Y*n&!~{q-x}*pSF3hcS}I&ZGJLE^0=L?5WL1?h%@afns7#pXl_m`DIG-Dp zzbyFMK%|m;b{|!!@VJuj0>~C|SVBp(oySi$dSO*zONalZ-S;+O}8CoaJ>^%}xH?eAjDS zW{+l*d1l9er6U8S&Sif2pp<1{JjgH=OCu=2*NVP2=$-Hy`8r=jXYn&4shDTi=GK>m zZTiH4Y72PMtULa8;+n1IQKN|%nG|(MMHt;2g)$!MJyErXAgZNEwpi-(&V8R3@T8OF z!8r0ok<7zPE|3XZ&?iXSpRL{#UHn0fr%rH ziS=1%0V?y5=~jaBZG^@T6Iqt^2V)Bw*1PMItkRCIfAQg*uf34+2Hj1{9lfE>y}rP+ zMc|Q6)}?BxYo=4oe^uc?kC7eyXtq4yezrPx>2XkZC|Rue9v3Acq!1+&8%(u;xTR8h z`Ez>@-Xivl^>-wJJ4f#u3~B~4@hlqgVbCkLn`4!qJF)#v*XmQ-_FA*~pnSNeo3R&v zRe)A=&CS~Fj5y1S4S^e9ZvGb9p3P~p{4^;tkE=T}PZz+yQrBf*2KyCWVauehmW%6W z>t|T~8HM4qExD6)mU501UdEshyAs!-U^^S3V?)p>7$n45`Kb7#v;b1$8#eiKZ-gdc z3Wr-E`Vh}0;sXyM2HPD`>@I6W4Y-9jA8ZZVSTj zTNO{n&&yIyhe@@*9i=v}DDjJT1{o8ULtXc*25iKA8W{TFQbpIDmqyunG45LUi%4jU zL>a;F$J1calj?3`Lo-YUr-I_%-=zeO6)juF3|uM#7}3f$qv*GHX>gP0`DT6SB=6m9 zxj)>P2?HxrFHU?BiZi^;-bRUUIbjcUd`LBwRSm7K2;)Jv{d-VGpZa7Yh(5?}LJfdP z%jUjp60ob~FEsN9eK~bWN2y#_@4lqCKw`jtt#AN7F30+;FKP`x=InWUS_tGM@#?47 z9Gu8w6g;29_D2v|}T>=XQIF2A(u=9m|U+R&VfiXCpOdnvk0M z>llT`myFu!5c2}HM#!xGoUCaM{}%N%HEqTPrW3A^y#2kKdhyxE`!ZDve_H?aRK+HY zesxEX?Vf(4HCDQSprJvleiuP_wup$fP}itNOW<^AcKQbz-+I_U~lPbV4TK%;aPOl<5?Ux}R!{4?e zG>(tD+f-Gt{)YPQHRZg$ADcX9TW|%h^|Rewke~n6u_>woDK0)9<15BHs;63B@x&rm zK#eu|!y6qeJob#U_JSz9>oUo$rtjx?P`)P ztgO)Tk9uIz_ft*hhh91j$AkQ@;ndJO$qHtC$PR%?wK!I$ zoaBdtg=xpmm+la$Jp`*uzvWqkE#T}!aLGOh{3Fp3iz$c~y@snCMctcimD#)!T=i|r z$cHAMv@XcJLkmGV&O+P4GjK9wDX^gsiR9^hiuRLeGxr2Q*Q`SX-jR-Z&7_@ znOlRsPM)KkWf0G!Np(BkbQGsePkosM4Irm^MwHAo0MWc0FR{v6|h z0TGVOGsw*N9MYp+2%~!ymFOg%BRY5Y1Z|pcWK-<@zV4JSH+TSRXk@^i$8w*ISJ>?W zG%vx+hy8j8w^gnm4C3{uha^xXADTwHXuyLtf_0tHv%IMRqg$9knAayjM5#(ja}WfGsG*Ra<=S z&$r6Tkz{ed-ZB!FP4kl>T}rh(^7mSCdT+Z;6wL&$1=xx@Qt^!0domq_O7jniF6M}Q zjZsR{#LIp(d25q$2Q5ClU}jKxEWPL(va}TY)5ar<{L)s)w6%dAS+SSIu_@FvUFpo* z1taPEF=bP~Sap);lqW8Bwr8~uqO&FtZvCApTNpZ`y`TLDuKq}Isgq&d?P{j74{CJy z(2@x}E!~BPPRRb#_Sj{f@QcoFkb z*gAWsc%i(^z2H|ldR=irMLWr6w=hh{h@8I!+aB7*FJaRVOZ}p2psc~`!-YVi<-Xv2 z(L`NL|I2U6?ii0tvr_4o;lv9L?_Im<^!ufJcuYCh%FExgY9nox zO|^l}C}xUtAJ01k!slzU(GS z&(K=TudoMvtp+<)%*{2}v7hOja}XwRWQ;<#7uOGe+)fp!d6vJ1ZIU8j=+?A^Gk+7` zfKlh+=|NvpGnYa-UcwiP;MEHHa)n+c$SpN2x*&QB+SCJ_T;$2bEg7@${U~g7(?KNS zIR2pi_zGdweLK$EJXz)I4bczWbv1J)x;2xh90?$(*oy`;nwOFV+aI$9FAH!s9$z%J zRg|d${N{Vaa@ltrmH!C!KMfQ%`*%Ci>v>X`2EmNKzJ_|qI@z>m$TiZM7EK!8VE9Wr zbEnl(ZfWiE)0G6a*J}*wc+KSwj<^e=0rF6``j1F z3(-1IDAY;4AKBqGLGYYoo#z_sCo3_5JW)edtZp_6NX#{<5~ulCg**#+aAa7x(Y?OU z{!72Mm8XvnnnU8{eNy(ru1Ob`uCk)7%>YPvcQ9e%K;2SEpZ)?nWpxvInl_Y#5RG%6GdI2c68mtI$9g!+YzPtGu97|_gNn<;YAvE zi8iaU*|+>tce;)Ba=&AhAuUoc9xPOuwV%d>k#4o)I{;8RfS%U7YRBHssL;>n%=uJB zRe6FjVd;P4k4;S*bXbuLKIn>PYDtj|DPA+Vu2eQ{3{cCnfrg{&fXJ$Mc~mXvU`q_*gi9{r)`W%Kx<3 zEsuh$q3P=4#VSV5*YLgv+LjrYz_EhLpSY5a8au;v#%rw8%T`3xjQN$425%Ul0ijte zxS5S!h^~`r<{K}uMx8Lfv%p`Q!NIHl+MLkhP$~g5>u7RY>8;V)HanmJ9HLhrMrvO> zNOSX8er;01BEjv|0g-+#+AZiU!|B2f6)txb-2x@(aF&-afvnKrXYQsB5$_>|_tjZM zN-5*fs^e@Hl2Dr$?PU$~PT{2PA`vQo!1RPQ5AkR;>dM_K;Ei*&S=UNk43=U_dl(+p z_oiQyKFwp1tE+W88(q-xkHgV9>!VBn3%#WR;MffI9=dnO`QyyT9K~Yc$Bt%xf=;Oz zzV^cmx~Wmo&W%HI1x5JJgEZ5Qy zo!LQX(H8x)C>>=s#Op>qjcU9H!B48m15$Yo76+TIrRZ^1-#S(O7Stm-U$MQ-hC~fJ z*hz{;;v3dSa;*L^w#p9uUEua`Aflb*DV~U>6D2xvUtIR8`V~8UY@oD`-iUWZ75aIp z)Gco5fIUYp#ND{H6Q-~7?dKucXBn&CkVdZha49t~BML??9$RZp3RIhUgZZ5=q!s16 zqgmf*oc@f)_l=YNpOeHWOf*9%Bl)$urSIy4I(0`kJ431nUM}<6n^2WoqgBX|f)RU< z-tb)79|oQrQia7ur>Q{g151)Td!{}{T7V$oUrqGye#bs9NpRsFt&JB|Q&8F^k&T$M^lO2wX)W1<|ta zNiao5M#XbcqTD1klxUF6Kv()NQI0JhQMUfd(#53K2=OcF`DHHPxwGtJfaa;NXaL_- zP+MrzILl=ARVf%NPNWqV;-L4w#jIs@lORU?sndW|n;|kiHHQHSM_9{F9<3W|zPvU= zv|&i`qi-?inb6agRJql!N^m@RJZL2=;$#Vbb`So0C;z|Jn+1G!WIx*0yM=~*FC3>A zb%|{B3lY5ps0-2I&ze-n_O;-YMU)!?ki;ZnP@k?QVSai2=e&$K$o&`0shVp@mEJB+2l*3ii+&0p^~uPn3T#R?J7u{BOt zh$BAH+GZ%9jY?}nN+HY%L;_Y)+j?#VUw5Yd)no*91v8cKu9`D^ASIN!< zG)I~!8`fKw$Fs6%qM}Bc1#Ivh9%Ck&v1Lrze9O!~!%RDS{OQj9lMCArjf%O|m*)%m@h zGd5GJ&uq!lxSzHZ;d%+uPsC+bz4`fQr+V3T%~K=i?#5$z&;q|l5EpeYnN)8idC4-_ zX8Ib;=P5N&lZ*00OFevGXHb?CKP%@i|A+%B(@7E)14}D+c~*Qv3+@k(cSI<|h?Q7@ z{fU|q&y?vl2#46=z{5@khHN;BljK#uEPR7hBN8D=MEm8y;8&Mfr!ds{E`Nb(+a%*u zz*0&i@fmK9v?06xhp-gI$y1s(Hxn%G*5zp%-oQt-+;g_THMh{1=ev9-RmPZmq4q1F zg!`+LbkV}>$+j^0tH0QLdx;Qw#P`t`aewYpg^QraO}|*Y1JWzdWayHak9{ z*N&KQYBN;3PvAcbR#U$4LYj0WCByXR)Q*nW`P&`RE>q3^wM!9zPy2XtnA&QCP&&WX z_ElEQjuswNVF8@G28To%(cNqkc#{0)vr$|Y&!<#X^<^4}7WbER0;v$JBx$jCZU`Ru zze0j0P{;is^OU?>iKSty)I`S$&i>zO|t z@pe-1+#wU8ul?d)Z48Y%>DH0M0QKjHio`t%5xDr@HQ6dcoy?iA-x2RkoQI=>(NTeW z>YW4I`(hLYDQ-v$Mi~fX8R0CxSj$HLa_DQW)mINDmghqH4r#G3swBQPN9#7y_1fo6 zQEpe0?2;cCkV(}c)Pc~Hmr(Vuu;-6lp~EyBtzSmLc>`w`s;oWUFSP3=c=ki2q5BZWb#so5D21z)P}!(B*dEMDZC|%ldQ}ZH;|7K zfm6#^o-_3A3Rica4YNc$M@woTXcjNRGN<`Nw29hMiY{TsW;#?aF*L!R&ruiKDn;Wq zmaU5Z@$?FOV#?+uyli7hPuObQDgu zoY*xvBm)+rCDCAa!HrAG?lpn(1&-)cMFVtHb;|-u(naOBn+?~qoE_w^+WPyg@$eT~hR2E8_zl-~$?Y&MJ1vVLAicsarAi*-!GrDRcLxGJ=mq zO{47XjYc2-Q9GchfJ647Qe#I9z4;5y5{>(#SU*r6t2@* z&@Tj*dXG9sj|Glxd$Qk&axSw}&KoH*16|E#Jy$juIf@g-PA@Ms{4EA)+BMtfoBDrg zy2n4^AI6zp;8HgOaxqZMeW2yVl7$G5jSCrDzAAG4OV{ zdMJe?JeI2jxvwst_v#rVp z$k0|%{SY$B{!U372Z?Ac-(K-+ZH%NWd`Lg#P%XRTDgp%_ZrgqF{rv}>TK+_YQEG=jGreJXaK4>cR(>9_`V?;A(^qhpCPf5qD##yJGHWAh z(Zq7zhFFtg<_6-}?Q`{pj_QO%fk_abcysj;_FtYjT#d(`9|bY`ES6t#42s-up-#gvV@iOah&4~Ooy^-OW9A97Om>1bFf9 zcT#?ABRi9hOr$b?BZno;4P8^a-0vDB=vdTAKJG5$g3|B^A~Gi*ncG8m%4mN=h+Ws( zg#JcDHcw7U;X0{rsa;z^YrNC_+T&b1=XzlHrc;aUSS77JYom&z3sh1CNPEWma}}03 z@n6DFSn+JiZG2SxylglDZixtzcf&oaRDgJ=UY>aJI(|V6rPV4~E$vut5^7x9 z<;q7O9>L8Da{`y;Y!F9rv1F$ZYf|b(J;ZfPQ!A(yfus~NbseOb=q7_37~0}is#=z& z-%x->y1I@-HjRnpX-!Ie*T?yMffl6Uckbs#wk_HxKWcY|)I46C65`e-r`^U*d{fUk z{j91W-Ob`Gnu}x}^%vy>r**0^G^~c=a)n5vMwlG~u3|ZGfW1qO&vr7yCQK-Q{OskL zq=F<(OKm^aYss}!d`7w^;()Qn0#wRlhmy40THM!0A1u41g{OT8^(j=;4U@^^chQ|~U1jkpptth<4S-+$K zLo};?saZ&-=HN;w>@q&yF$=1nm733KO`l{Zg2o{IwgPBbfT+&mn_BYI4FtbeHvcI zCJUGRm58OFI=13`b9foUf(pmVy4OC*6k;%ij$H>ztPXf%i1a2%)lgj z9O|S86_nS)ZD%;~YxtOHn<3(PqF*3SL%b;EFTZZc>gjsVKd-me=QbAN&Mm1W{=7FW z*iqi{59@%Tn?jMB3XF!0*d$hU##Q{hI9fgjd5^@`6K1bAdVI7hytMt%?l3M27cYY- zz(fn5jE&}gehiwys1*qfeeHGH2QDsHo9Tyr&C%pe=?omSY&83Xr_ysPJudC#&0H^UNBeUd zQ?UTtLm%AUl&D}7cnn?#dn+7i9Aw(nR$Oa4EJ52Fyis)5e*S)1=d zS|5aq76W;Bd;>mw5c*W^MvbKo-_fU9qhAfYaoBc%TKrnooo$apNhm8}Dw!9dtT{CA zd44S{qcralprqZlixVABcDS1B@>Nh;UNebYFLwXXZLLAukMW-E$b^-hGVq{Ee@kt< zevN2`KKv22r++c1-0stT4s+W{G@;>JJJ9SG`sP|<0_HZ`T-X3|RD*64XT!1D+Rv+L zPSGtZFNf4@TnxgvTxf43~N*^aGPOP{DS``d<7?0}_k zsAwZ$ z-|lk^-alUQ8;*L7EG3CvA-ID7AKuP$3#XtJ1QDrr8 zBD{oYx0|IEcNdx0;Rep>LgmC1F>e|VBcu&(1ng;W)F|M8`(%2&ij>M;DZ*2XR;B79 z;knoMp4LKgl40Od-*kQU*C^dcGZtF2b;+504xQQ%vmBmqpAV{=5^%I7pJhbHz>+}e#ZyI?DLf)K7pNEan^&Z!TCQQL{_!BROf280u~O5eblMo z`mU5V!t}oLOz zqkn7jy+$?|@nUX*@>_dbo4*a4U}@i>$cqzE@k+_dmM&509OY{r=EDmgn~vVt{rL2i zn-fUI%K44~h%H2_aOOA5T*z29Ja;^W_P2ONLR7LaUj9kTdW3aX{t2_w_jCdX-eV5o zf60_8t_QU)(0-1B%}ouEtFd0d^6g=ul?xuejDo$tF+Q!QH>XnVm**7(mNk&qm}sBA z5l`fs;tFl!xZ3-{E8FdgwT9gAC0;%k zF+W#YazP>tpP_@m_9yuU_LPKRuoIZY0dw4Zdt{5*(~PF|clEw+zbQ9_MTkgYtEHK0 zuemsCV)y=&Y#{=)6?b%Ku!WyH16f2!%&R5aJ4%hP?NO}ul>HqjF@lROHa_vMCS0Q) zgyxt_wIqXzwK}Q7%#=zv$yLeJluBrL&K6cmRp<4T=nwD1?t|(PHDrnVGuv8tLJno} z4L{m4+A?8)d#>X9!af`0^;;6+;jeg$aq$1T3Rlu*{P=k`NSrj1p*78a1^$eaEAGiT zm>j0zS?ZYWxtxNVgbX(JJB8;lpZav;zAv;lByL&bxx^?6rCuq-Q>}EZQ*O>pzb zOpKi5r&sxj)>s`LreKdH@nm;r`infx%YNsJ#c5l2ZuxJdNWatgs|j4|{ZO@D^~qYI zN*Qe_DM{?%P(%VZ54|2uJr6zRus6}_%J2!S4WY(}ubC9Ya9-ynMaz4Q$3HxC&~kmE zivv!G`Lz2rlYU%+j;mb&M>wUnM~Dcm zE1wPt$J@l-cH8x>w75q5Nx?{pWx0O)1n=#*f^mzT$CRU&13NDa3d%}2)B=NIRz%gA zOZ=6~t^*ZvM4f1$)vV+*>zJgvGd4|j@@Jf(U}k@JYgY|-KNf%i|Foo-FkCFRJF2XCM3MvFIk z(J1C*0=TdB)U2R(;9}Gc3w{R~Upbprmi+-5F^2l<$140puCwtOhq#sNzHV5PqOUTD zOLSqRubUuQC;!jiE)F+eyTikmHytqxc*P7u`CAD}ITXC%ba?qxALP=i*bGMn{nVEX1uIKc3uRhNL(2Xjy4lkQt+2~azlqpkscp;6Ux9sz`I%jv_Vsaw zY}*BRCSOkXY%w~HMv*_+ZhgQj?U6L7a(yeaL`d$BZH0 zx>b7~pDb!ne`LgFDt&5cnCYvuVDgb!xX9#4I?OX(eg2EL1nRJ#CNTzDutZ#f&VYX; z$2JjRTx|A!Oj+Yi^!@sOTiuqESyb8;2MB`yK{q40flfhkT4v zi(Q8)!s??&Z{gm_c<*xPoL*9GM0rt|n3O6zI<7*i1im$OifVrC^Xri1B{B$maChgo znf9{{LiE7$lnz=w@2c~9<;);+mZ9^n7bR0EUogoMqY875dFAQ2BL9e&2?UEtNO_VF z%lmgFuOIQS*Pp^8y&J+0tW!5EW$3qeIZ0Wv)xbPjJ!!15dq~n{R!O_4I(r zJeC|WFMEaj-lUPg#)b5f>@U(KHrC%*+(XcXgbWZmP-T*whQ$=hc(4LhYLxs7*+REg zmFCW89$z$;4qkmQd5h-!TUH}KJ=*wv_52u!Lz-C>=-jOHxdauMh+a;P_)U8@Gtc|x zH{r`akEvqJz`j@9Z&&h(Ec8Skw{ts7{;JI-=;FcUl>4nfN5Re>Xs$pODXu9u{q&MTqD4mOAql^joN11Y3JeOD(a3cq60P*6K z*|J<_{xx*!RGMTYGuIenSBsGuSvQc^j=`)08*hA40B^D!vkHe4gRDJwxlI2#z>|-PXd>T!u?H+KyFDevcp3Z)JxZ$5IxOmAlGj z2vzVfwv!-tss?y^u}0fs?@qspL=|JHT^p>_IFigJYm_ad^@rM_$thD;Nk@F_9AVn> z++9NZal>=@JRGOoOQ=t>no{LZvGv>K;ZC>PpbFo%U4BCO$R+!69=onAWG zcLTE{Dpirof5^8lb@wAleCOV}n+K*UTGPtGh`>%Ff+?F{XIy)S=SyfO`IbPi5(t5j zH#1xSOVA~;6uT7U*GojPdmslfsTLyk2GhuSY){qMd7F!hMCxRDrM*iTduJUP-)+T} zju_C4t<`EpAIv%Q#mNm`np}5kT;4io^Po+7x8gSP4Hm!VVoQC~GO}TNJHc;;2zs85|R7JV4TG+Dy_0}iJ?8;hw&agP$*ElBSA&~pEuW3D^mtHVWZ7r{&#v>tw zY06>tm(IB9O<(kKrijNqWm2WxZ1?ubLk6nm6HeMn=-{Q5oC$Xd2Wdo&!g0q`brP3Z z7X#+$S{P82A+pR0{D5`zOwinVpFyhb*6>W#)6?fEqTEEE<$c45U*?rT2C*xUyK#@V zpm<9=DLUX4h)kz#8JUYCG#KW7wxQ5GPyKvFb5d%@Sy*4i6TT5?}#S`+B_0%2|+Hg|S&0#wMa=(ud z=#tr=iuaP1>MJ74yoW|dX0+Q7Ul}3tnTw>fhsw2w3K0ZAPZu}DnlFj`XXkA-Tw!UG zKCUh2R+UTa{XHtNRlzMv2E`nHuv+{-wK}Ac%iVc&0uKe8kw+C%DIrvf86D`SK8l?u zRCmtam~mrao&KA1^2km@z-FTGN$=8OAGqsER;dqXpHKq@iT4*5JM#O}lB*kE<(|X& zH8UhkC!^Vuni}6#NPU>5uA4pny}Md5O)jfE6ONyh$mMxLda>E$ysO7%$enV#_ST!)S3@lERhI5axfe-XZpUA%62nPytvf!oz1Q=!X~w9)V^Q2>8Em1lSby) z>F~vhP{ZNx=Nbose&Gyg8E61#3L=y;YYc8U;>R;dqA)qpNV7=$RdCGtR}(kKX<-=r zp>!u@*q|h_9qx$0EOU>(-bM+;5pPcM%B~`_o!s4ebIiap!d3#}!jE9E;j-MaozerVC7g5gphZ{jAl_r4dz zMv?6T-pS|#1=pFdFgX*5{^XyAm&S@TGdvypu)5Hh`0W)rgzGw9(%HqO;pz*XN#YKt zN9l}9JVas1Y_!#&SRguwaYQA~#Ksn^q)>k3$-q)!cul0EU0dxxC8K0+2=>m}Yf58V zvE?l-WXs-qqhvHZ?n)?!P9;I2z+l$1J0TK`$dteDb*f7~>x^N(=WP=lC^2QfIo~x% zys0CQ)H|6eKAb$?=P9hpO!0?4w)GKFbcSNE&<=GYAdtJ`S>hN}pSs#qXE3hJe zQ#iaHPj{5irWN^3e`_r4wzja+Q3r=Z8u_2qw-K1B&A%y{e@~g&id3XK<4h!C@SLi; zwGt$1F+bsHmgjn+_)sq0XD;O3V9gYnXYdKdpi_PuX<)q0@yiv+c%=FS^tus#nDdS`k6E<#d4|xmPO_5-V;1}99E%X_TE{0wW+rN zdVK=6&2)>iC;3eUL$@J!)kX%r@VMKJVLn~T7b7>`?uXRB3$r1#6(NcsD8DH58Dg^*srp$Xkl_3^!539ML}j+ z20#B`F4lNpb`IB?8Z)xd|5Dw7lQ-l3g+aGhMki*2%u&3;L06l5$--t-OWtGXpTlNw z-Xgukm|m&~F<~3Iz9KhdZwh7j?mjQue8^&Zm*S7q{3ae)$Cq~|ohCi`?$9Ywtl*VC z2b;w6dACpTJCxf1HoP@-hjAeDcv}CZQ<`+0wJKhQSR2_a$qBSPXog<6e6n6iJU-N{ zHXk?Ibzto@WREtfGE{LlcMy<6%=qxLE#s0?jm$n^d`M#OOj{WHG z3SW$dh`Lk1F_)$lsi`w+=z@0o@S_3G!d-7B&}wTh)c%|~+N?9gpQ;)JkF46x)@{jL zAhE>B=@JHlIy!Eo(f0Hw0b8g@JhC^78gH0*kb#I3@FQGd-etb)+J_`h&y#8eDLt&y za`UWBucyF$Z{Xfn9}X<5E^j83sFX717QY%0tH|(e&X> z%kW+Pn(mK%kDul*q5`^+_^~E9Q{|K8%^`((e;y)>P05$H{zCs?GS!KrKNLpwW@BV_ zOOIZwlpo;9wLqii`S+tuji=wLcFZsqH5OWJlzo4nMv=+V@GQal_Lm`>@Q=uH7^cV` z@ej#yVx+%6adVn}M9LT4H>F-AwU)hx1`Zp*?Eo8c3@`9iDav1hAFRj7Y{NOm9utz3 z<(^YDY@S0otm{s7e-7${@6XLu;^a+Di6*L{F*Xk=+TIQJ3Tx^Qspb5b`VYKmnn`7w zFZLdUzlgF{vo0Rx|FDTP_Z>E3b!Dbxu54jB>uEN(QSc3b(=YI;$w>850$V`k|~Zd|NU&oL5h)$DOY)Qd_~C3@meC&j@NOQ zt#s$@7_@1T4%mp94A_kVM!86FGG^-p#AI|a_|>M6E0&<258sQ_v(Rs%IVGW~JozX3`3_A?{uYb}d`svk^D68-3tT|T z=5K~9ofBVzLm!!Ix;)t}jmhG@$-g;go+sH1nhpgi6USdpW$e8*KK5p)s^=W0Cg)94 z!jc4Vt$GwfSik5^xdpmIS7QRI-91FEW3%93Hq)goAwh%!+;9-oBRHBCD|8!FG`1Ex z-De^F<4Cwj>{$j`g&hmKB;M1o;fS5V(($X9^ zP(R>o9)+;@1zeVQ2Y9HBuTesB-F665z0ps9T{1@Lc^WDBc91Af5mw~b8Dm`fvfiXP z5QJq23}?JEwPs4*#%vp)lTuH%f@(M16D8UYt>@ujvDerf3rj_Y;tEDS5}i{tsiu0u zxmfc7fhls-yXP`JS>ss1s!RTQq$w&d&lD2?f74gN%1Cnt^+n1{W@b4V1z?}AX6-`}C{Wq6-F!mf^XmVJ7y#;B%~yGzR5L0Ocnz(oZyUYLT{ z($rXjJ@+b33iV~%t8S{+2RRinXw5?)58OJf>I(tTW^8>n$wkARS7iNqt?m3Z0g&XM9%CYx7Cc2H zH+&g;z7+isrKzUoxHUb#f8Q_4ar7Maxg=6%!e zI0=p)OIf;jXjf6njCtsgnK?Kk95Ifawyt--cEvJW?k#9+o9kw11JXEDYwE0Y|Cix7 z+onP!vtafI_gBpf(-WVOH)1W&%EgxAt5wVbrIt)oHkNCA@H1Ky+7nL32<>kIhMd&q zxxvwm%4)a!72+*iYlA^l@ya_n5{xkubBrHDk2~sy2|Aem-DA&xO(rpsIiM;&C46w}&ma)6>2K zbr3cU_F#JS$>j#`jAbzAH2|mww9b;1M9{c7e{oz8sS)~|Jo0$z3StJYH11fGo^7+i z=@``&;1o|ks!iRGNI89|$M+|_W^8A=sc+;7UAy1j4zXdv1$1`u-f%7zYP8#_h zm0OYyH4~ev^`IIr?9Is$cVr*&AII~>n>FLL`|+>$*&>Sx;R$OaQQr8DF z#8e{8Y2-?k&x0MQLg!KQ%J0y#Y1+M>U%lF8_n7h=iznlMTs$VmXcJ)Wv)xR<@&jad zgWTTz;119hj`j!zd7|5c2A@;98PQydDib+Q{(8#GhSRguszbLNmzR-=#u2<0js) zKEOlXY`pBjS|Ry_pakF|=y+>|%n0{R_a;bfqy%_1FAG=9VJ*@^En%>b(NjIXk7k2t8Wc=*~fq-M;+B9{uiZ zHu!*TQndB44}Lna9ch�V6j?^`&iMkLP5oZuxukgBt;&_Ds3oVxCm(F8e9Le`t{{ zTQ|o&Hb*Rly8D)~jPCGHc2;f|W?CW{icw+GMhh)8>!(MpD>sE6ZCiD!aR)qu%Rbr8 zQKmwB9_Q^)q9P|@{Qv41W6*MzuGiY*D+F%=thaTKZ3zy4%T*lVGRxz8)vYYW~mhEEBc9vK-@ON?JLw<@Yy z+1Y}ROOx0)$eC6aQ%P2bhhB@8s@GBoBMj^#;t(vqjLx~Dcvo5v8vuln=GIC#AfG>8 z*XN!qF`jV@_2eD))v-^1F(MbzrIdY~wfY`q&c_TE?PVH~{F*L_;E?b#`X7<~8|)DT zU{zQQt5@xUPh+d4GF574eNC?jH3os)XOYEI>d|3GFjKf+F{+LZz_eR2>W9`873&p- zap&j?a-k*CDzPr%88%6HYcA|onxf=tcun;sE^0%n847W&2;9+41|C3eBu`g;X{cgX zIBYisUkSm)UHc^juskAhv!%{_CH5JXbl|5%h91d9GmaT-PA!e!E^DF)fjf7`f#Mc8mi7hmJPcFRQ z?>t-laxcre*!n&8en@?Hqx|z%KmTB1RyKW5Y`jjo9gzLXcmC_4YSTw?yto|3o=~}A z5DUnBrL6y_O@_GVuKvZW58N%UJCfl2FM%mcA%YZd_=r}9;CBuR9fpymZe*P+7`^~k z-SX6vQS>c0oH$(`Y%G)gj7;QU(4NZEe0h5`H?)zHh>J{d(N=!#<>x_Do-P_!{*ZV! z*Xw;d4r|>xGsPh|@Jck08#PL}8}{sQ*1I6$4=dJr5eR;gsRl8zuJ?7Kz7c7-4Rc$% zGt;CV_H)A|bmM1K{Fu z=I2a3L=O2cMwApy*X6LgH^TSSTL0xHNzf@JY;@zoiZ{y2Ixsj;$x}T`57R$3&&eVo zd@r?_G@sY&d0(h@2rK#7O!DFzkhJMuyY<&~R4-CJ|En9a> zdAN(TkF~FrXSJCfp9<9QC9%6M+Q-Jm9TEg$k1Fe-=Q!@L2Xa;d8T>YcGjPc0NAl~) zF&`9}Pn3|Yxb_PXA4C(~S4e<|pc{Mj_ND%`^6L``g*R5Vmu8ZS8|ZLABowWzr=J*L z;{jO!Y2wG6?Ee9vJcFTyhQONHKv;Jf5gg{#{C84Q+)E@o+&tt%Y&VUB zg1`tQUUlo>^I^Q&c%qS>OR|t3Jj1aV6GDdlBaw7&3~)oGB_>J$E3cF1uCuPgfiHi{ z+GWVjc1GmUJ*+k!p9FXvk>e>52G7dGBl|Nc(3)Cxk{rH9T*bDSRPh0E6RvgEF z@vA`P<|?UE`gKB`C1ud|Ew`q_VmI8QKA_nj-Ic7aS-B;yID`zD_{(D{Zutq7()ZSH zYqR{`$p=ES+0@-qPrU>sbZ#W0*fBYD!QWk)Stk;W6;4F~=ol*E9V6_kk0)yBZj1dEs6zT{Fpmy`ZAC;L54#iMNg!seJ`ZPFu+aczGXh1KV^&Kw?8 zx4@=wv!b-@P(~=s{jI6Oe9Xv$Zu{>pUH7uDS@V(os%<+iip0*!x$S-uvHEruDV#?5 zp4!dxqCREgTPH#muL$(iPw_7@1x)%q7@h5wOVy(Ek|edSMWiz<&Ce?|z)*MJ<*ea} zHjYdZbp?Uhx6^wF8W~6iYlj1jnp+_StNf+#-EuPmHqCdhYvjscMN^JfGMSf~-B zPsRO+;Zv<(*p;?>IG7_R>*sm%@W2Z+L=LG2Hd16g6od3jgo=H>3S+Wnh*nq~26WFX z)b1jHhne%_y&k@Gu_3wzG4 zmCSS}n0{!NV&=#@aBh5Kjo_WlF8G#T$ASLF^VCqY)igm4#o*VFk*78Z{?qB5brjQa zkt)Z2n`(s)la%uJ-8@C0`5iGz*MwEF%!ZArg`-WOUMqI z7GOqguLWn$at9%YO>d4I_4gFl75(R)d1Hlwv-I}fc0m-g-!$xpwYz!_b zeSM~3m}#F&?a<-Q9!I>Lny71TEOlntWk9JRdQem}- z0PM>li8aQT4&3b4Gi2wtG^ulfosS9dMsWQ5VYK+d+V-90UWm;@1jphr-7S}M2udIN zJHn)c*t>R%8~506+>RhqcLhVB12p#D9t|D%OAA;*hV3&nna{T-&T#g9FMn8R3nx4A z+XdUb(5>`-bDqBc{M^r1rj3Zr2LyKAG6;m~kVB(iU*17shaHD-U-_T~xjpax?2);j z?+&fZo5cO~f(24H+c($?VAde63%kwx#5Q|3Irnu61MbbIpvESc0&s10Ehd>ZR>2X; z^I8|xEIhuDEB#LA8q+!CRd?%4i2X zgVvK3&9P4J9l7%5#x!bX^$p=lw7hlf&)hqWv5YFJnm`?-G>*mL$^{B)Dk~uRqe>|y z>Qt8MPx5|Mt9y9_HXEnW6WP5lUh~xDKKIpFad&HwuvY17c#YX601xH4j{U%iAx(}c zv^$$2+i>+!(Cu(^!>2h!;drdg%EafS1ITwApLK-}rn{Asle({v9SkK6jzJ$b?Y@Wa z7i#@hsCEUK-F46^)7){Al3!1Ye0s|86ZIG8Fuxdv*K|$IF+??=VVZ8$6PH8}?i>iv z9^35Kk45(9`1Phw<5+kv{(jiNO86n#i2Z)M!o;Bcf9chVYyApV?PD%1@B+B8pdyBL ziJW=G-efG12E&{QikqRjWRsRBa*|c8%m7rESl2@pRq>fS%e=lf)t3sOxaW28bZ}=2 zpH-GlQ}633Iz>e0=oWOs($P+VjN#kTtX$h~IRCvvYM|livknq7x-zqoS@pu* z+&6~>ZQ_3IdSM`?{WdjS#a~U)Slo(~ujMHgukdA8 z5#^-a;h_q=W2ZUq{&Oi!7aZ7;MpSi~v7&9jI{GXNh!&Bgo?v*0YSj}OGQ+HVC z@IU3$#aJ|zlDCzW3uZLS6P3_~~8%h^y)CzNk`|7KaFlm{$>GG6BjF(ZCh8M#KkK{X--QGu15OT(em<_MG2Ezqbj_ z>jl<~Wrb3>`bA}Ib-%6?eb+8Ry78oEzmW}!zY6}EX~Inkx;v7B%evw2qpftfXR!>9 zz>wHE#{mmj%SQ%y@ry68ZMT9GQumo75$s5K-S8SlQgJ64;{#<@Jo}nIUyd1_oTHLu zk^FTz*{{>aLQeSS0q5I*Qww!^Jt@)E$D@I~!tA+ZDbYxk=bOw_S+Sv{y7G=A|Lt5C z)h$ndPm0OQj+MB>r-S>SjsVq3zI-DDk=T_{6^j0 zM@v^lfN)y$RZn4%V}gF-^EEn;>2M_|t;1|K9y?%$&o3oz@ot;_^wI3|kLcZs)W~8~ z_3?=V1tD#}Y$Kf?3X>?5cGU&na0sLlX3Y26hxsl>Gii|YJ;@}Z$cRs(Nh2QcjAlT4 zjfrb1A6PTG(wSqIYU$@dXu^2t(kt2Z_& zWBil&n??;)-T*#V*Fam6P|MBMoSlsBQw=eVXP4!PCeePPcf%duOk>?Kk+uv;D_GdSLC7=9V4yz1P*EF7E%Dqx%^ zv_N@mfe8@$r&FPVDu9b)2t;)O^TCb(J>nQVmJQ{qofrDv!lRD&g?%afw;4lDf(NF{ zUIjph#I!DZdIfSOe=Z;%0j-qmu-#A(zy$0|1*}7*&kXB{bM2PcorS6cYi?wD>sv1n zr$nZwQV7+R57TL+?18ALH z-(o{pRqZ4@*<{sbb>GAL_k~ux z^sn#E>W!z!>-tHXMj862LS*N(?uPX>zHt;o>4O_DHC0D5tudZ8Y=NU5n3GxhEQNBA zAfNx~Uh?mK?>ckP80mupZt7tCdebVw6!DVNkYR_l01E$jvvaaMVCb@7;jvR2a$Yi8 z5^n~!0pl&McfwI=7>wKJjWe^`Pcz?A)Ql48LsMtwQ1@mZmW?kzgfrE-vFiK&j;2`U;mikE{CN zTBw4NKBiEcZUq z)l8B%;0?8EW=hYt$drh7+M56jRtR#D(RxPvVWR?Nl44grmT~=(u!U^^m$pI*d&#U) z-W#i?a=YH$Qbev#r*>Q4^qulQk>XM7bSnwCfCO-Ncu_3(6l2mV9Ot`CL?*Nxhkt zNd*f5>5cxQA^Vfizw8QfotyVk6eizI#U@rmXaR+?+-r^9zMiViwakbmy{awUWe17@ zUTYlCS%T8ve^f-Y>tp5)1WNy1rH5!Q*Vb&=-I*NKytko~u8-M4pj7?eQgc`5FbcIN zgXCu8j2wDF@2K82Du5d#LVtM}QZz?om^yDoUeVlS&czU#TkEOr9om!X`O5ywoQ67u8yJ9hW zRgatsgQur*%CwSrKYu(Tj*+{Yjyr_6HBhWCxk78pVJEGrfXaM1btxD%=#*(+xb@0% z8Sw{Br9^yBvGQI@or~~XYNlk=+w&=m;;QXRgB2{wv%C?c>q7QBB|d(n6EVt2(R%L^ zc+$*ND5(J+FbURH4Rv%jC=abW07`vX_yL0N&AkA`|bUD;|)9A|R?8acjq#40E08`snM@;;l8U25a75@DI&o?e5} zxIk~Y=S@VvDJQaTjHN3Nh2GY#18Q4c!69{0RGDgp`b^hhaekWkB|0i~&JT}wUjY^% zOHb5ixD$iS;+YRUaPE!re|t61zR>BN&YA)=4+{Vl~! zQO^z2yQqNv^#ZzT;k}{T?8elR3_S0HXI!%{fZiO;V7}Q!3))~DnWPXBI5@*n;lPN0;6*NJ(%Qx`ZPt-5Pe>F2!WWRA zR$v!kz`fMe`=KL!sCI5icm$py>Ua;WRRF=5*-?v(Hcdx)k3S6fJ=Z7HxX3t9KJ6is zfQL2aw%(E+!XkdL{%2B_rXP3Ml|4GQvt>sN+(_(6psKyO+MlHoX761p9Q@$QONS4E zQmLo&hS!klgUg8zn;f23e95r?uxtkNDmkVzVE_+zFsgEr`K4?IBjw*+eXPT@_y-Vd} zos^e4(DPM4mB#;Nf^zjbe>w?VY+!v$dIC!y^ndeZe9f(9w!#LgGzH0WUuxfz{Yab2 z;*m8sZS&{WZj=*fcIc*vEt2|APS><~&!IrjgiXEY!?$@+F&CF%Kz=KgGxn++XaEkL zxpv8)W)Tx)SF>S>Ba#ZE_jt$47TX<0GoSGmdR?KlAU68fU+JpmeiM*20#~)1 zR6{(m?74^LW^*9WygMmRbBt}q(IFo+Qh1+CPIt$|y=~vqM|8B}*B{J?O`RGj+e)U) z9lp(uqd)_*?@4@bOjm+FNiKP?5+aU*Bb&DxF(3Q7NM7#RH4j4*rO3QodYzSvdXkMvE;x9mSfw7+XL^V z5-cYl?auEgL&M87P^oDY?UuaTkl&G+)-s9%V<9!Eb+-WLldD+#0bg6AGzdGUZ|yqM zJHh%0G$s+HK%Z2lmOuZ==Y^Ks_5H3OMbxKyK- zBAmHPZddJRhz5}Ie%pH)Dr$cU&YQJ6ghAF=+Kfd~@37>xmlm8YvDz7%|1(3ie_XZb zC2NHesRa12$E$VL!@D9}rhtu3^b*A-(T@rFoeVpuL{jo{wXIaCb4x$p%Q(*^?DR+H zrxq>~ii%a&4^h7AV?fzzwpkW-Hln!u?(k>>OzY3WfElVwy80Xb|P7mChVL^L>G@$0OqFLxCj)E z4s6F;^%HY$5f6JTYPU9PrT%%BZu{~xM9uBNei{UAwA$cQY~Sn?v|EbsJJB1@(j3bg z1e%OFHkQ@KO-^VOyo!Rbe`x@Nk3c_{D=mfV^Rn~`?V5pZsk6ts?>8dB-DIBoEwh>Z z%TlRVN>Z5s1DHQ88nEGyFz~w13K|te!;Z63V!!6O_Dge3+;UE7?LyT&g}Wfk`hD=v zzRyc5o-mf6JW#7As(b!{ClvMq&yatDOo@DgCdh~b6W2!Ztbm{@e+RVN`4@9gizZZZ z8@;6H4}|E8Pa4YpdKXTNEs!kW6A);4e*l3}K27ZfU$y38<0^b~*1EDBdZhjS`3g8F z2MmI7vl9*=vtkI+;8X!k1Al$`YpkPAQ``)Qb8u+5-v>(}4;tiv{uv4gmalS>^}os{08{55y3bvd_^5}MvpHrBIM`u`!=Hh!vJM6{IzcJ9|%_I zR_!i46@t=j`3kgU@bJ3P z(b5J}lTv@97hv`RtX1J7T}hZAI?S$L9A#4Nz2DD*w-&$yUH}6Q^2Y}rW&*sUe*3@X z4_Mtl%f@*HSp5ILL&RkNsfaW0}8l|L21+4dmd=}zR-0gCz2DbYQw$r-Ycr;wq0Gw4$ z`Ts5qU8o|;U~MG&l6Nr8 zzqSs8DE$4S&5P(h2AV9p0c?y6!2p5v7hWkjlcI;U#y?F)un3g{$#tge9@uBibri>@ zS;5?oL~q~$1u1_`tPz8YEUC!+e_l116{vAtwBA2aNP?MhW!z$LPIZw;yzl+@^?wKbdnJ(LmlVeg%WDsm{VBCs#HwQaGoJsg zvvG{T|12u@?@=SD+xTZaITY&D)NOWZ$x6s3SbhFRO^=2aGbCN>|GbuI<*5H(Q^CQN z8}KUmkDJfYmeVc9{ii9G&Mf7CjGt6Nwd$Z+Z4j(cr9_U{#8V$s+f6XenXi_{-s{w= zVx6Wk^eY~1cm)SsJRGmm4$60D%73PeB-V9m$&VjW13%%G1Mgi`!B7>joyXJ42kZj= z&3Ki=coiL8r^k2XP9fkJpz=cN*JzAt14Ko2mdKC+gK{kz#^Bn9W6^vyCa7D$;rI;# zLd~qlcTl37e*(lNP31rv4hnl`6p|~t4yTXsoKw3x(R1qX+I{nb!7+9vB{8shqZYK~ z*y~LB`Sr*Q1k3v1<|Z_kYeL(m^%E!o!}j&_^z@5-yb7A^G=vD0KhnY1fdDzAO69~< z2B|n}V#YNrn)Q237*Us|J)r+o^vl2)0 z*V#b*(EGic(=m@`A*YiNO*nM_jSUn@n^vR&)P8TxpIb$`?@elQAq7q0duYS0=b zJ0ARM6`4*$`+zm$o*(A2MK$&(XMsY2r zMt|WLf(TM3iY7C-SjXShu*kj#sm|+>3pH3mjIZK!#iC4#XYTpsp~D+;n7AyKbF=0-EkAl;* zfy%uSZb#I?x>5$iRKUG(e6Of}2ozL0^JuoA+Rj1RvHSQJn@Q}+Pi$DZd+`+Mq3WcQ^g=~=u=v-fL|Lm^V?@)>Y3V8bygj0T`uX^@+7(V7ej^($p?MsEE5SVEX{ zyviD5{+K$LfGI!L=F77ZumKKSoCyX&;}_L-_KLy9X!+A^bPu$WJ<&NOJ6!NsK|&#< z@)Hnxcul#j`nj~1^&mY}ld^>p+pf4aF`)UPBy_47`xPQ1!o z)AKSAqrV@bwshA$T*&|l>9MlfL2OeEDhp;+lT*pw5b2s^;;3-^S|c;z|t|x_gYKhNr@TwHLJKuZ#xf-gwEcMNl1*p z{_*Oj!Iu}AI6t1V0eIfp{oU6&6DoKeN8e3yG1y`D%V|Np7o9jfrq%LreD0_#LxU zN_=LhETL;N5DOcq9ptc>Un&1or<*4co}MY+tD%kR4f6~n3E^*t2o$zderOALg7Q+@ z0?l|8Pca;J_`TR>p@Im&Sptt>L2f}xKfPZNOqaWq&{Jy1(Sp{BAYs5S+Ca1ZKr}6( zH*gLOnFxl6k(%IqYiL-_?CSS)Y?>iUD7wE3Tm+YG1E=f z&-SlZU7i||9e>y~vyQuEOo|mkx|$4ZR*)=W!N`dW`HIfHuHRIw8&w!3KC6+S`%-Bu$9GZTJ>y%6>U}sh+n(H>pMTq zd!svIuq)ZvK{q&E0-X3Y|E}3Bhe*8#I#;|tz(11g)e*y$KNrjRllj+`)6i3W_ATe2 z+M8WU5&KX%1iSsXZUc2AtEhMng{=g9`vVcGnY9U6>7XqSwsS0cw{daAQGlr-Bu7*?ywXC;1Ld>6F7JI| zoPOv?=%9qcp8FzIC0Rj0;bGQ`uM)dq?Qp}i1IZMgE`NGUtQ5j}i8?luUFiQEViSCL zzS9AiHU}6~eP|O>+Wz*fW(+Uf{4Rke{O7#*1LRwL3&^1azjx;<{3>7=O};gl44=Wm zIq{($Fn$ab@EfPr@lkN`b$P?3Zbj7`tu(dd@K#gju2H_4=G@JSV>#~=Ty`xhQX-rK z!b9yWnRQd-p(hbMHPy#=sYJjGhgJP=hDIzp(9%a!9|AbjONDuyVM;8$3qtJUhv(wy zrSg2QWc)GGnLbw4_HeH76qcY^I9(;rdVN2%@atT_hJ;j)8yUT+$U;aDnPWhL)>1T} zS!_M&cf130Vm3sAL>M?+75^1Eo^}~h80(P z4Yq>vSwj6B%EY8DvukEIB0q+Bse&_tUC1g6=R#BdA!Xj*DzvE-`SB!sVpG~Gp8?eT zFJ1m=-aAv6fb3dufl2YtIUkw+k0XC=n*V>hYgMgPw=LTR7krx3np5PJmwksK+slgw zu)=D9qYO0!yLqK^d7SqGc5O0IK$f)3iIboDESj_eP*HWKZEqKpJIn&78jhk6WWy`$ zI2GgB`#J{sX8y9h{~)FAX`}@}UmUvGKON&%tY!JJ-IB6uT6QDP#!Lo4-%GzIpX0Tj z#jDS{l$j|#8TVNq3!g34f!d4jp_j;)OLT@oGqJ)iJ|g6D^K$?!Z9+89bI)xoh8sH5>|oH`FpI zKtH(Gf7PWuKO9g)c(d?*=Y_jkoQPkb81;v+G|;eyM?C&2xCm2$Cd*jYC!||WjD_DW z!_ZSHgaU&O8~(lCmG|~E+A>@1fKYCu#`n-@s8UpO`0Zg(2t`KzAJv|!Wpg!wno%2Ev{U-Ge^nD>d0 z^SMUx0I_0nWK`Ti&Be8(-SkuKj_dvR8&{Q`mCb;&@r(1hK_Wl@4#UH6caJNLW=DU7t$7W+p^txG6f5yK!?LN<&tBMkJPSbVumfa?wXX|kfz59;w zfCt4qZ!?*WL9h?ol$J6`6&Plu)0x_BSywdM8W=YbJdv#$ofk4=eA+v^$9?Ze%_uYl zU0-|nJecMjv0^RyQ$9|0;{O=7fhfc7*;!kTC)M7ET737A68WKpZ$i2je4U;lZtq1@vzR{QRaJL-8Of8&@N8O7>)aCCA9NS$VC2d@~N zGTQyPQH&s}(r-v<5V)$Orad{|m0wr>9RIOWn3(BLyBYjPh-23g*NY3GyN6(>aYkB1 z@UWN*-JdNh-m;a;eHMMp8Uatd|9ByG6p(FgfDhR;X5;k%mlC!YUMs!tZXxx)FWQ`2 zyrn*WZr15iN!vjyyI_ww3pbEwiYD;tMLW5hIq%Zds((yjcSo8p;&2E1WXpG0sVVnUp8>qsV9rw{f=w|U5QA&?rqptuneP9n?(^z08AoPg5!!h{+Joga%R8+SJHU_t|Ii^?) z@HYs@jDa1jfs@Z|ok<%mf#81hr1-3glm)stWJllFL4i8+hskQMs{@6UG_6p+8Xx2? ztR-D1SP*)!_JHG9f#BtOU}$dyoAkx+fnaRA%;!y`0pc|DB8=9Q|D=F_1=N$jj}}N) zYL@OCu|V-a!XCOE67Kx3opJMSf^d`P0VW(&=-JG(Qtn+>&+#>ReC%QTpz~o?$<&j( z<0{Jp<#JJA(*%r82!khi@W|}xJR{I_g=@R@hDKdG`<=m{GbOCvZ^Ddnwm_n zs4DvVj8Gi-k8#rkj9>K1fNXW^YhR*Te$?AEj?RW=_Z#ni8LTUQiH*r@In;n_U+?!H zRH7tjqTVcgU1o@>T3WdUqdH}6@n6K)MG;gylK{H$%YY-7`B3CRV#%2FjLj@$r`(~v zoj{ArSe;nDazZT>9zhDOSOe2QOVW@CL&9s8qWF39qR+K-*L8YeGK z0Ixv-SYOSqbeqa}N+C{J*`~3+RhRw2weK(Zmj83DcFlZw zlLdzl261ArGz=ZHKiG(8@COU3X+rr@B2B?z)S;bEIgbFB4lxYT7qHk{7S0&)ebI@$ zSm2^s{aU!}H1_Hy^NEak3>q5QK@0x|M_1Jo+igjk{|98=F&N41Dg=t(j18I60Xh8N zT~xOb6|CZ?haT!uJgD-Py7cFMnvYlx*UYxDzrM?<93Ay6!p+yYtAe|r1*64jfxUT| zt7T0e7mrQ4-^4?cJ~Rx>4(sFIk*#-hsF$VKl6k^^NX1%ZyFh#afGm>p$p6;F(91bO zQd09vco>Rwh}kqA2Ye8pjS`-2kf%Kyp#IlQq%_FZ5Bxn~?Hf(R$|lmAiRav(f0_Z} z8kyCXaIx`D?y+hn0-bSso>?7|Y)|Jr3mp`0)PM2hW{iJV+b$wZg8c8@J{N=TEaRC~ z@&a$;%4%HTv4LSed)dkfN9p2CUQlVnTcA2PPfAU;w-X-4*LlS|tpnTG4-I`y5m8&+ z;t_*nI!s9wb@kp?-v0%ra(VMl%WR>%Sf|M8Y6v!UO%u-@2})5SE4>rvM(%F7~6< z>&nRu-_NjkNG}`wUgW<>>i_&hMV-5>FNo#I<=V%e@l~QU?#*zzts<-Y;m1OP+QSu| z)SF8*&rKlC^>5vr*h7!AoGZ6okn$vei;8ny4Qnx4yGk;W$5zN+eai|s#mDd3T} zX8k*?oFSwEb5ClZJl^mXWzjQJ!k6-u&lUR}l5n8H%hEzdYv>+f_pq>BJaeu+5#z2|LMf3 zbswwW0c#8JGZpT1_j1xlzv=hRoaKwG0PxGGd4AgFLID&}aZGS?I%N<&T{RhCiDU*{ zB65g^`cugFgqTDl6L>q@>nZC~FHBpA%&G6&A0GKj8v{$_lD3P!`|@qkealu)rR1-+ zN{KWkvc!UApHeda{IAMI} zQ8BlgnW4xZ-N3&7@?tfwYT!fHDCrT%zI}6GGUKS&8Xopo?Y@u^kU6Ef-{iy5At~p6 zHas+x^aE%ENQkf@(CK||>@NA`4Y)KvboUo)bELJcGB@riv(bYQiG<}u4sK9gY!M*W z?pD|{IOO(_IjHTnB2cI_sfg>AtLFqk6&WLJP>0(5Y6!_?@`<;3Mgj<^NG`qx%Qky={O4jf@*jfsK-8)7OaOtWGG0Zx^`F;+@(jQxh$FGFIkBwSJ=)f0f$XdM5R9-jjq zCB=Bpv7sQ4iJc>kyjdL`h8#{ChQHcImzai61f2w$1p38zK!Bg~Qa1mjI8~Lt)_73@ zLbRd{Ta=w@O|(Vz{CO=rnh3Z|9m=*$GNs$UBA;_51-0F>G;f}NuN>gaBZhdNKRx$!oI$fXs~mXNp$=u}`$rD448e z+lxoOdHg!?3n_zM|6g)|-3o8vDG2k|CzLN(T>ox9uG9Bp@qhy@8m_BInlN_t{&HF` z#tw*!IG`aT5|XcTpx~=xWY4oOp5;&uh8OTo*=RRhuz!QFix23RLH+(Cy;$!Lv52z; z+lTu@Sogy^)7pvA$T`Qox9Fliq#Gupgu-%aAF%H+n9RQV^(GCy#&_hatWde|s;%vG zy01fdO_7>C|IfrAo}D`6jy&?YOy6st()EP$A%gMDN52kVW_h`_ z;ADa{k8$I!SnrLSO##nZRR_MXj0|ni<9L$uvmqgQ*?VctR_;a(M^0`q_xB%oMdtuLJGn<$JE6DlXdB~& zIpBoMYsJ)@8*<;L&d6&_`Z4KIku7tLIT*0sEZFo|vyoiXIruv|7? z(ULXLxR-A#nidSJ3Y_mf%?eVS-XFxh{?ZXZj(%I^v{h!_xp{$~DhuN1_@TsX5Oz?MO)#a|>anR) z0X~Lji{F6kH`MYOZ8B;P$ONkmX>;}80QQGYs}i>nkJZY2MkN0X^}aL9XHLd^K`Zn> zxpvQ+yuE*G#z0O~SJ%YEC0;AJ*a$fF$5~MQqw-k$exC-c)3H#C;s2aovpE5)M?wQ9 zZRV)U6ohbhT-)oXflx$O&c(C`J87;&rl_xjUC7$^XrXQZ6J46d%Bu#RLuw<^8TD3H z-3hL$Lig;XbIO@%xmx0 ztaLCsh=cwQXC&J&*3 z#dYaF;M}#Z+Kj0SUtrT?b0?k#hXwOgckfc!YQ2E$SM>53&B^~u4$ZeKo?;EWg4aAH zRZSKnm1Os}!!#{>_^Ed}bc+KzLn_(S5)rD3)xCC{Pp(DT&l` z|Etj+uMU1Z#djeS<f@dUtwm%y^0WpHDw!(qAlVG0>`JAhK-Bp24TWp5M}JkTtb^WkXpVHM<7G zA*G{&tzpT#LiG(zk0X;3q%*4C+(_9P4wV=_#@xDuxVyQUTL(eySA%zKUrI-CypqSC z42zMNb2 zr(k6{AhL`vd^M9QT$vl6&;8DqR`U@?4*dq-qgbK#*1xbMEUwY2M$i|^NI4K%nO|o8 zH?zP<0aYAJLWDF~{bW52#dyYTBXR=ihfQ$i>DaN&Pq6eHELr*BUgjDVl=&%4M|&SI z_c`l+pObMgD|1kTD*1P7kii1FEf>ftYmE%(G}<6St!C~(4_nbzS57a3lX5shsIRnt zJEsJ>T)<8n{&!Se&M)*4pJtfn&@##_hkI^ujm;CXq;KMF6m;v!Q7{q@XS`-kSj$^c zRMgIUJZ>5(?w~3zkp2_SP_)2x=$8K(h8=a$`LDwx94_d*QZxt)V>n7F@9|^Xh7p#G zVz^zCeU7sKhXA)N(zn;n65ji>ByYv$RV1+H%y{F~3fK1+Q7$Hfy84Bv!D%U5348E)#{(_uPypG2Osn&pNTdZc$c@bixBT`8scs3GbDi6;CM zCiIX=oW>%EY|d=x;51G$DUdkoQG6??v{GlQ;{lX@&hN;Y&gT0pNpF$ZTb)iB4P~nV z`!+_a+LaShgoxsI3vjR70M2`H_B^NTP&_UwbXeH8nj$nIqk?vMLzj zGJ_3=jv`txVaZ|0MrM7#asN91&+d7sDbtN>z}O1Pe>UR4Xdilk^kU2LBV{-uUSRHb zOsl{MFk`N{Gd6V0eAZy2B~0jcv3?_)f4H}>yZ_y(Gw0Rrsqsvjr~Bt!XNMWq#!vc8 z7dtAYKtRL;(==Z>+S2C`kXOu&NVGL>ADVAYJ+%j%0=)JH8$po}Et@z(?FohjZ2I_g zL3oldJ$UIZ*9{`M9)De_UyJ~t3EpwuKVUZ!@f*sYOdb9{#f8b6&uaT;1+z9H1V`ZQ z5jIVux2#UE&;TY5mCV?wD_D%CG(rSGQ?+0BzU-pUx)^OILX!x6zagk$0ntqfggUzk zn2~gHmRy9>t34vJz-HhVJr1xZ=C@y-v=8EJ1BWHe%{jm~4#e0xj%0qh)1BMPx@SmM znMqxe%Ft)ya;C)UWW?f05>^|)jWp_v0J=%y%`*`%$>AiT^M>^@>eKGd3aRDl@4I^| zEcN|}l(Gf<`x9_tkQ5|a)!+X^Y({Lch5BZa)C78#bDSphv07tyCG+`HOVRyN2Gy=E zz`K4+TQXp!sTJjl+{FQxIHY^hugx4)Y1`ePSx}3NVC8o}F8vG)^sF!~cT+y*PVea9 zXx8(|dBSfPjjXcTuP}wKv@1xMc#q4tejn-Z_<|;1Z%}2dE|chSdR`f_%2@pds3f0V zd+X?m@isj}hAnUe-(9|3e9`*t|9Itg=}QM!{N4H8<=S_1PGzs#;oH=oE#VTy-96;T z%|F&mT(LxQ@_t3LS>*VglEUi~ z2VcLH8QW$Jz~wn4Gz8?5eAxV5b&4A_Z+o-;`_3#df%*ASq4RcR*v-1BQx(A!*+<(i zhI_8l1jqs#F#AFMT1(#kc1WUBGIt%de6^){zZf&J%6|0sHM8EiKB;~jNXdJD_)yV3 zaN_M{fq4!7kFbxeG+b#z9IR?Ou<5MRj*0 zx<%tQP7)Y=6%RMx&;bskz{PoQpFAo6p>Pk63ut9?pZ4y)CZDXvAX5d_(0bl;ZoykXPc19pa{4ZiVy zi~MUIr+7=j<(ZuEz9k8^tY|rErqsnn9MSM~))bI_vo$pd0NTn+yyLO;kZZ{ywz>{6 z_a_6*7R*-3qZtprdrSn=|NmUb3AM)+E278DBY~F$rG}Jj9d-&dZf_~8*x163mc15F ze<;tEf|PMN4`=GqNopJ(@*&)&)EXQM$bcq~ogYEdlV<$?i7tN=Bd3S0W< zB4hb8?Q;LW-9T>TAPktGp+7(w?i2cn=USSZ>N9gUK4z+SH_VDp4YAN4#^t!Az_$%< zlJ3S)n{AbivbJ#4&NnE-j3?N7L2s#C)4TQ^!KwLB>*LFZ-bw^<7_7b@?nnRC)S?*xs{fr@-M81T3rN1m1hC5wmZM%AyVIAmck_9+TFbjzkxQCs5k z^RtcZQL?h=Y%f4sD|c)wvtD84a8E^_;B4L>z=6@gIP_?~f@5Vpbh~y~>^3U^dWT=!53%H`W!~MNZTD5NHad0K#9wiDy&8c*~$FiKNY6tyJ_SZsv!` z)&6|R4j($KcCxx(8#?Oo_&jP+Qb4eZ8EBEwZ4RH_R9DOqvAEbK`h&L^)NR$(r23;> z!}mdsCJGge8*N0P_YbhM`J?&frcc0Ceshg053uRg1n0J2^%DowfVj@3FbXx#5>Xn8 z>1tTeH~E2=RcF-7LwH_l>*{>)CB^A2`pm4JoQd}?rN^p*0s9zg6^bsHj0hf7Wut3w zGR!Fn-v2By2ayLyIXFq%#J}Sx`HsGj~+Wwq9K)Z42 zgty(!*=z|1?S|9TNxZkwaImM$xqte3ujiTx#C@SqH})I)9P7bqH`A$#jJMPv`3b%3 zh)3-aS+_l$2Rn|rE&XdcW3tPTuNax^_j{YJvE*o5+>7ATAf`XV`fTU_)BkytF8fwh1%t<^P6C~*;hjE-3Y#yW7u3%quNHu#f14O zdr2jQ=-1}Pm?<;whxhAum(W3*A3U;$`tAOleG_NqT8AIWY68*5DGB4Ts>f%???dT0 z6N4NoE02SyYz@uSbk|S2`^YGz!(sS{xkV-PmdD4$+U+1MC-$FPgUl}0CzA4|MrO?^ z7#P{wHy0P#c7IQ>2&2LgbAD(53f%4f>Ir3cG?zzKRc+2mjBYw$yG3GVbh;65t$%|l zH45seTx|z12fU<$N7d`19gCjGhifr&%?yKe61?!<`41WJf6nLeaTXfK-P-T`!)UF| z@-pey3Du`&N3~8A4{8g6o)kJ{_IxaMN>?-T_$mmhj!@nN*V+yYK6OOi_H6mxuLPK( z#KoUykE1V_RE7C?B%lNsnH=n^N9Ebb#37w2Wj{CC81V$p2pS(lthG@!{==$99-?`MW&iuJ7n3#mCT}W4! z!cp(D#8zFIN6_;poT}3k-4%tB_@1BMc>~?GyvvZLNP(?7Xjgj}oF>7g9CjJ)0pBG1 z{{4Z&`8+;6(4*{pyr*Z9twWLqK)I_qSqE175Ndg@Eb^#;{qCehB3x(V6@AqE6=n?$ z+P8C5S*k4?7xCxqZZ3;53J)dHol9WL173EtDkdQ=V0L(%(^9|_(9w5L6XD zbq2cL{1R=<4~My(Rm1)1WJH|E8*qp3K4yOJWLOLj~V9zFrr^#&GkvwIHk z`#6V|yv~4ANsuQhcBXEfE~YbOcIShVlIEj)UO;!M^ir=iLws%gq?>4Jd#_L3f}FRI zKm`jLZK~&1@OekTpFg@5=@;7~d4jZeH#*m{N(Y@pc9Nn}caV~Div_-gH@M`_(JpWO zt(NCElJ69rPciPnMaED?4Gy9i;s3N^x3f5=^Z-+wsp)|&E<2q*O_&mnH>FY@+%6OJ zS{Cas2TFwJs4zfprOrT{Z%3(+sHU8}7RQ%KnXQ8&E~#NlL$=~72;3~yae9M2kI0I) z8SNuEvOpxun{BMa(ur}oe&&Wj7~eQh?#V^j8B3=&MR;lpkUe? z*U#f^NDfaU+=o-;da`&DKiF&!(qrqAk=1wei7G5MGuZ2^YAI-v^YW9kBQ$wz4^xV2 zAkx{NO;ga@kzgJEJEzaVc;F@%Q+`C9vsErbvS-5)=j|r=+=+D$c2q0H7rJ1V?rdAL zXml&A`j3|JUZ(sxr{`P+m$+LXm!KgVF7x?wSy;hpEjw>1FV7Gm1QLLlvHd=(#Y{m* z9{&(p;ZqNJy2Xmxn)+%OX7lc4mjN(+SS&PYeLhqgEMxo_FE*M)qgUrqrzfT--69a_ zG&qRbKr#T47c}PbB7) zHR>Ie0zf2$=PCkiWy@$Ozh_rzp8CQ%+uvBC`3a@ZYgw|M9ur=8(d41ctQSnY$8h=l z!*-tmLg&&ftG?^H^&sRHykU#~I(ehH|KPZl|L6=|hM(x{2@#g8iKn!LRhjD`GcX=1 zqQ|PuS{mZ!^+G&*pKNagg^sl*fS_eO@9KZ?-LpSFJ@L1ivw}%I12n&?i0z60cBR!y za2{WAUpJ4wv8g+fLn)i%D?nE(NJdAB;%19>!|&|IipPeu{3KFG1bw&2|Ku~AKIKt8ix8Gbd0ACT zYE}a=&o7Eegrb>Zld7`$*R#yO3Zt|ickb%~?88!kUy>X(f`dwOHZH_KZ~~RhOm@o= zebu2SW})34^GqEjqmXNM;>Xv68k8#Az*~BU_(k2Rvp^#a1~1p1orlNTgL1DSvRU)~ zy%e2%WT(&BAlhi_s&F!E{U!1wJKv%AP4C`bFUrpG^q8=P%`XJogebt!aW#?$0m4_gpVJLhIsnRn?r$5wn|J z{y^Iqz-b{1jKsyXYi|Cq67YPAv$c3@oiCr;$krQGGMQ0KrpXu|uQvr^ym;Aljvnc% zakwS*JqdZ#j+4CBo7B|X558ON-IVeCE%@!4mZ?`$2 z`t>ENmoufoyLh4k^Qihv=K^*&x4-Ky>sR(~mT(vr@ELwP7E~h~ezMNI+}}g!Zmq8vk;0(tmOcI4yt(6s32d*m5;r~lA%);9gEqM%s)SI%VMif{ny4hw(u*K zV+4=KMzB8~NmjQG#sY3P`DU?&mvQZ3gWr+$4NXNgu!+)CA=ot}f4dz{2LvsqHq;v2 zL)7sNfg?aQ&voFWWd@B}&7KR;47&GvF;^V&cut$TiMb^w;S4mibx)sdbw<0@CUgI+ z?mYLE-@9ae?;W$4!I7t2DFyE8Y8Cfz0@TGh_2uL+USe+gXn{B$M}kFW>+ApxwC=b# zBSk4ApWjw+k9)z6tNGWA%ab~|>dQ=dJ*D5Lw#}b&hV#x%6}jHyaqjvDQa{*Mj^G#6 zU25v%_dtQuN1J?kg`+o$ibqc~!dvfaC!2vP{No(qx#ROb;U1%y_PuUeC}lmxo2~)8 z#W-&ysGOni$T^;e5T^RP!7~uTAQ}M8eURta=Yvj(?#XV}rSs>+{rlU@ME0Ue(cdcT z=UqO*gf)fHhJa7U;#D6?e99b*j9B@8 zH8&AmJgY^KJV?1Uj>iSS_4Z@`8bjl&K(dt}w{?frGwj_^Lpe7vn@fquRY7;rl@s7V zaipp|v%1GcBT+b;7wF;H*XOl>vCJW$mT&N~`y0eY7z}Yzeo$=UpaCHJr(3?B*_bKh zSg97FYg>6dlLtk|%8MjRPr%^kWSx^>+N|1o0gob3FA{70(kz{X*4uwtYy|N$ENZ%b z3-c>5i3(9cO|;5r&gZ<2UBT= z*?M5xl`PH~zN^pkSDUJuspVg2Z> z6uGr<2{+aFM8G?rTY4Ofg2W()rlgwrrG}+4R(ua1l0qU6?~%EGFfBORUjBQz(=NBA zwfmpI^L=_SPUVaFs`2Ui^)Q4&?m2Zd#j4=bm$$e}CG}9qUc;OF9zMP0l-D4WfsV>* z6JBB5hVWKPVDQK!AxWS^+D`wZ9GdR=QX$$Kkm~qm=BV2YHYH6@IvVSO(XZZM>N8%xN!llQRL-yc75(#j z+(8p$UFGJs$d|sk>m{D+`x#*eeZd5V0EhJtqb$H;*&kz!}=5j$|>xL{x z$2G`RS#q?NHbAf%Xfnmp1X&JOtb5(cQhw?bM*g9t`_6l_p(`^!ysupQs5G ziqTISh361Sz{wn1S$Mlb-xOKs9%RE8FoWhY+CqlNJtr&d%2+mvw{v+2Kv!>;L z%PPfHv)lBOWXSxQY*-|ge_dp;m_g>g^mvfD!sshD zX?;ZgXRUqR-^+d6t1bnA5l(t=--aewVYU`LYk2)2nz! zq8se2j7>G2TiSYKP0km{H**M^-*_WBc9g9QHF`Jxb%UGp z3Q}BUe`xdo{Q=|gT0{klU7THG*f*_lQ2!T$th7tzqv6H%WwHfLFfGNhHV;BLEt@1L zdy}sCLstMoyu^*{J+b`*NvVL&(O;fIMk{`|ofP-yjDpprbtj8M?D2UzpWL^#tB(sG zN+rfx3c94<=ji|eU#oJTGJNm4z@Np`x#0LkZ0A%gE%N4u)@YJ|rlge2hg(CEOuIPS zb@wB`_ldi8mgWiV?Z_EZcJv`_6ijjWn(A<^z&Flg9dxeQqnEPM0-MvXY>k=mt9;IX zP1?yLIk+@92#(ot$q=BrmfrZ_W6~9i#MORV(2y0xxLd4my&ci=#@h5xid^94fVwK_ zkEzVksL2`2kwh!?NV0|6uuFG9TZw#{mlb>pE7B%cP}nZIRla2hUxY1g3a>U zSyjd28A~hpbIHS>&QC)F|MS+i&gVl2kFvNSLvHQ;UKHdW7q7LduA+PP&?xtBwcm<0 zz3H)g_RKm(r(t+#*O6C}nduPdZh3u;D1!kN!pp-i6?V;#`cBJAE^4l<2~J8%gu+LY zgbGV9=6lXf^CRhfM$6kXIW{LL?zlqKb$u-N_C2fjdrEqS-&0W~{aXq{3Erxpd(;(% z8}DIK4_x5P^^9P|R=B?5sg3{4=R*li8bAf3a3~`qh^ZNOwCzKFG+;H{56EH?+;84? zx12!eb+1d)^mT&@_|vMg?zDuG1L>MiYc@JW(Q#@eLLBgMwPw^-+1Q&srFmSZEfp?e zv6A)^&U4k}bDwS7z-sS_a`sYsIS=*;Yivs4NU(n1%rw!9}q zipl4V$_f?=LQ|1cy0<$G7?}uD#zA`(8R_YN>ruZ1OR2@EqfftXP@bc5+hS*|O1iI~ z{u@(I4@k}J`+dx-`LI$Sx)cl7`=U!Y`gC*H$87YQKF#&whWCfD--f`xT_pIV!GL)6 zgL5PQSbbuEoU8M8qKRS!m1dJC?NKh#G7%+542a>cnD@b?iPy8xs2zpJ9pHj8QsCPi zqq(8LSF@7ucROI`m8-S>FjziH&-XkN&=^Tm=WwGYq|t5XG>!Wsa(H5?kd?o+C5x5o z;jm6l?$xqAjK=X={AWwv=weP@Urk$cSM5TyqKj$zmHng!Yb3!|^0&HE>w~g7Ucn7b znZjw&Zq{U#Tr~0C>byP`w~YO;`la`KOjH#tzbF1++Cud#3`#Q$D&ivYs^i&`$Hc$S zx<~Q6TMz7n1txka=6}?$GIr$B*2PF7M^l;`G=_qhnSC4 z*nfNG4Q7B~g$yP;_y|sGTiI(})vSU;U1m#{JwohRjdIoafoAX|Jte`+9$L5i(`R5u z;zg}$a&Q(8U(DrfeBd)$mT-)%cfnr!l3wpVXNX|8|Nc>b#n5>D6HB}^XG-c5d3TOR;RydtdI zGI~OMY5BYIH~8cVs`N9^xa0*9`hM3T1?vAE47{F^RKco>Nw@>%JfCw{2eO!8trVI4 zWx!dN&@@fwFh|YPSDk)fJ#n_XArjnH+3nI(!-km$t|}LkukSq5FH4NheJfGpKLO`1 zdM_?dD<~*Co^a${xoq1vuH0f&Gu*e$#bIAHs4laye|!Ay<1Q1f$F>F z-Z!AtEg@T74MjktuQv#1xSkzETs0=&V3ueB!E{z&HiYYuWIAklmJWFKAe~H2O=WAy z{=UKP8iAR6NWb8baCOHC;s4R9bIF%9`)h_Gy3FU28cTyx(v1QE>h(x*WQu2m!vHht zVtwNTXn^@$KBqw}uQIqqy27om!@{FXXemzbo#+^_k&?VtMu3Vr6plPvdJ3Zommrm& zG#r&M>ayMO%>Sso{RRsXsMU=gd6;&^^ATm4Mw#L1E1Hv6wxEh-G$fx^TMZ0%K^X>jc2BovPtU2j_E)nLfb|oD7 zxZNOubEb3+dG0oPu5M>^-bc=lM>Y*%ay?V?=+0u=D1uSiVo9o|!?^&}M@`2JKlC*d z8cJ1*$q9)~&Zkemresp3t6gr!eXt!X zYSrBrNG}vAz3A{Ri_K!{`0H)?fgtABL2xy|Q$sL~BLOn~!BY609zdXJ6sHCxfTQ_* zMVOiC&ktBWNxK$IlZ%om=3|*r_uLtzk-x*|(aZ`&kqE+dnxXh6IT};RwsE3ITjyMq zKH2GY=K@wwO9){JAgvLJqdy|0@2(kBnoNX1qzVcpqpAadUM>odEF$t0@4+;zItIUo zU0wo9oS}3$k5#*>$P11h`3)gGAoHh6)s$d=lp>b%->W}w8qO7s?^D0gBLg@>FWr>g zE+ML|fbiozT&k;4C!ER^4iNOdn(8UGx|)J!Ih`U=lRz>KI`DL)*CzC8Nq9qb6EHWwbvalhgeOd znet$7g9ZnfJvEOx&yL#3@WmFj)n$hq-^mZoOjhd767cK1LbNf>RF@YcogNW^mvS6B zU$l-Tkh_`jrrFQzdV28%`s=#(Sp1cJ1lT#U~eE+_kmb z>o%n~DF1!?{#0xH&U4;&g~A`%vx5UG6TG`01}p*Nv4ZUObTto{#^P4#??vR`90z zIl`OYCvKqHE;`d)iV7!t3g{0jxMQ+6=bKOniT~WYXT#`m;P8=|n5rNn&p+%63T5C^ zsb<78H85a$bnoDvRQ%2PaZ}I{PrbRW!To75^_?0sb$k5B+Lr$nvfA+*M^CdZW8BO< zQn(x#jLc5Xx0rbWU(Ux%3%|pcxtu#H32lO)!M5jI!UOy~26NWpmQvUz57%(xb;W_&#PH>B{#rQ63pNZ=LqV+Z5t z5GeC5ShKH~5Kqyju^hyci8jRJb){kuRmIUMhw`fPVKVW7}B2pTdf)Gr$i6Re0W!zO}Qy*r! zwK5QG^xWhCh|1ik60mx8Rz*0!=SHL&zDoFSm$v8x1uee$gKx%)3)^djs}nE@AQT}{ zH96C9oHzVw&IISHbanJoN44Hm>D0&K7RpqMvH^ZSu>eflp-7>`-gwNT-|d=9zWws~ z@DIEcjp-)}yiEl%M%h8KXzSo1Ag<(HzhWGH#3BApf!9@ITVbxA@TS_@Ea7wB?EUdR z6uXG{%bh!n%t>dnSkTaS?5j+qFBxr@K%HlK`LEL2c>}ok8&Bn|%B! zZrZY4UyrFw<5lIv?Lzk7&cOOOT%GhlOO)48&~b+6Q{GLwM1j?o>AN2?^0pt2!Xh4+ zgE4DFI{2qJY;#4-T#bZ?DH$J3lwHQ4$!FG^O!hRC`sv3`-JE)6R4@7!c0|9R*p33v z9m?SAH%Fn-z8vU3Z?H6Aco+!v_hVfkfMZN=aOoV0eGM(isemGqqM~Wmrxi?D)=9V? zuwg9iYWZbUra(qX(3Di%0o;6g=zKLDlC&M2@yH@pM+_!|R11Rab1^O^Lol+-YAf%w zYsUfKri4}!7-DexN1lf2{vh-+Zwu=#Zik4I$Z(p(5i1ie_3 zlL3QwIckibV9>>lP@zjQsIcsGgoQ?h!L+}_%WWtE_3RkB4;MID4cPE8>wVRY#NR35 zl&tYOvkza^eDfGXDb(B%os}0`h#*AJ1>wUXtwPB-sK0_1&i}v2u%I0*+aY^KN_SC( zU)MQ0be`)g|FQ>1y}-}1C)ewP{x8(AMMb*^!NK36|GF)ZiSDHWj$R($lrEs%jsbo5= zy6n=Dp;Yq!r8pT#c2@yYoOIxb16FruuiU=MD}>NS>$|P~=y@D=8`=IDPz)DYn6Zdc zrZS@qew7h`dPdE5KRSIXUOQhn(rTl)CMLG}<~zf8!EySW{MUUKK~l!p_fN=_7FsYw z(~|%MoQlc55`9w<1yVr;fg&t5@SIHagdS_B)gIbY0Wa!OfAV?ub(I-{Cg==zX`w+a z(zL#;>K_Y4yzTdDs}Iavs{NLf-#AvniMc3(LYT!WqhIh{)DLSU;46N;vSb7;b-cx8 zZn2jsSrrF_QB>G|IrIU7xSeefRvfU|!7FVCLlJy_>WV3Eh0<3;>alQ^)eX7xRuOC# z(aE_s9_yW~(|P$FqKIJ>&sU4#_W-KRQQpZ=mKj^8>M=-Aazx+S{5S~jPnsju5q&yl zht0G;T&55WBM(M)Y05iOEV*VynSajM#>EvkR-9kVh!6=8LeZH2R&~V`mrA9L_{9Ut znT7EaB&>mb(TU%V@2`sXe-+WOwsfYSEu)c@69>#86yBDv40pNU#4riajZ`w+vPxM& zwuVm^P>egmVEkTGa@0ORpZacZUn7oyjMGYSV~y5Y;V^c&q3jITZOYmkSaq}BD;4u< z&@^@0xx;ER?g0TKNA7wZ4Y!ZK#hfKaQ*XYIbd@JOYRhyKuE-niw%SO+9R3-FOYzNo zw`Buj%;@vmSt7+rNmWiZ6Nbs$JfDh*RHCSANnbK9q4ya)VSeu$QSl6RHaH2Ckn`4* zq)xmyJ4!2vY`E4$SAnsjJ>2`ChW&t+*#o$4IDJ05Exe>3k)ZMI73=f#Zbno?`uZ=b z2zt~U(TC@h_o+Lm*0N0@aOLJ8Xf8uAH|`BPx?o}lQZ5&O5lqGp#}D_fKh)RD{%@WV{jSvw6{4Mf2h*wn3f*e1 z<6Pg))MQB@-M_es`S|M(4`&OdXWxum07$XffCo-uadB4|t)SYU+1Gb@l++ z(HJlljc1}NpBnm{ zI&5z%x(3F%d9vbT&3C7!x{f#{5Ri5!w(c{dq|nDoLklT64 zenW9u;4JJKK9+XOSu#ey=LKgX?+59SmB*-#PQ2NqX%`hCiw5rts6o<1qEu=S3;XyQ zuG#=r!T5H*!}3C_oG=`6jxw+@A)NTbm|>>Lj}#S?JoBh` z^{c2ENdlAmVY?X5t_!3}@hJa?FZ%^gixpHw3OLCjyso-$W(0j}+Wu9=^WXWb{_lM9 zqM3UG|3EtzJ;ksqyraLM|8h@O3VN5kw~3c#4-?jn+_K+JmlH1!$lLMVpvjP9I5pJ| zgu$8QXIHRD+cjU&#a3C5uA)Uqg00-bLE<%#U2ZFlJ*B`b*HST0#mk-gZJ$}nL3d%Y z1^bm!!cy?^E#h#;=-Q;;2`YLqzRaeAR{eZmC9Y9gODLR51|fQ%o*{4|(4~W~`~G0_ znSbEkSOO#8f$4-9`iGb8HQaUQ{Bj;&lM(+85pDVXH?Bm0C7MLlREOvBS1_j7Y35eG zJAWpF&rhO|n&`%R$pA_a?3wxWArR~9D|n`3;P(VZ)i1RE>C*q>ZL@<$ zgiF0N7wa9Rn!X})xZd4tj+Eq>k^%(iRjFjTPJkH!isb<|S>a+HurPEHo~Gmkt<>+T zhBkxrLj~1{{|{4d6;?;rbOA;P5F~-%9^BnMxH|`TcXxM};10pv-5r8ka5%UgB)G$L z-tV7z=H}c$ch|P6U8`1AeNh%M-dhpkC_^N6YANee70#YN4;rBOyE zm6~A1ex!-Jh7fH3o~2y#6krMT5Zj`d*B6fiy5WglT0g#!})WcNhi(7?8;IV;+?vl%Sv|;C7u{ z;h){Te|CZV+xg3QtIQ64zSAtx>hw{1le`xsVh~P`4!_X-J?8s0RN?lt(CYE;Y$Njl zJP)QRdfrZ!sIl}E-`n5pT&f<>wFNzK>!Pd}XB2Ery4CwiBSle#4N$&?)#2`}3&ioX z_sJ^Wy2B2$6FRqMis`;IOirB+x1QTyhhjOk=}QOp7x{DLdk6GdM?F4C2@}m*RGnk@ z{!N!9J@36%y+==K*biLZFEZHCM;I+!M|6)@ zJnz1E-J?RcW4&88xI`du!9=kOLxvrM37bCQdnASZzy``Y$m#yl>OyHzm=A$9w&>uKM(6g%uUsXA*d~T&E0f>R`z}1`kYU`O)~-19Lb}Ai#RE8dmWQcj6K`b0pPkh{puag zy|0a%_(n z{WJa9`Zl+K{M^`XHQR#%e(bs`GlV;ZQ$O(MXHN;N{;AjtZo8&Z^^pBPO_|Y|fWJuf zq6kWID37KMghND0HJUHmbM!ou0BWjTD&Z>O!4E>gsT*ve}eI`t<~38j3?DU-wHpuu-tB(QTx= zt48D9cIUiz(@xb+vBhZPheL|Q^ zUc|o9F6JckorYRrlK2mH<8A-2L0+$)<>{R%mjfd!n-sT-SwtObVvX2ZvXb~XBdXGg^;10@ku@tLKfaL`@7`w@+$}dLV0o1pz3x#FUQ+E!+!o!37#BTVKLH1h9!@+kkhNO`#Tor z73DEfHXgimH zgHEe)h$5Y(!ooo7yk1}tagwj=F81ZuZ>E3}p^pV6|6$5dS@;4-3RyC9)fVNS%I75; z1U5UQKbGsB>?i2ko7#|leYdP2(kin91h}y7tiRY!JWQT5rU6N!Gov>hz9_iV6kC&E zt}|`ri5eXbGHIQjZT9R-C^6sky6>C$P4fRcYxO?Cc=M~6brpXttD;R6#k<}e%J8)gCEZmj$P5;L-#$0UeMnw+xR|dkGG34`IK&l#)LM$C2 zp3EGQlSd}$uy$X;s9Ac1UKq*SoGEV>wMF9~h~%t6sm|-$<~NSOxxPSeqC-sbKT%z+ zsXL*!41o4ceW@<4MaRpBIQmHXhh45UUebC=}!m3^G4}cDS`N+$bCD;HF8K zVP>lD4|!g_}xZ?)RA>I76D)KxP_;K<=i-BnVw*57V|5Z^9is6`N78Hnm0JZ zR6Ry1;%9#{V!AyOHwv3AF9cc|S;w`sdiEF31W<*QWh(P^H!bAtF`_*!Q@{UMz)acy z#|Bepn#tGwP?cvVP+X1WQ{jGHzBzZGl@3A)`;@QSKioZ3P;5g?tp0uWyTYd5hK#fK zT?b+wjB%iB?ZuuCOuxtRy|yP1hY1_k?uwD=b~Rq+@;TQh&TcqCSLUigba!&N?pIV3DU!>ZyE3`ULrqoJFuuYOZ=5vdQdZ@u-Bt9(s?cq- zPmIuu(I%>Lmb#P)A3}j~a7QdH@em-Trmr|t%*Q-txZaItgV6UeajdwoCUhLknp z>i=l0#f_*kAjn|~&Rxxhy%0UI`|m4?x|13DcY{a5$ysKs(V10m@Z_urDL#d<_ZSpR z9)WRX0l4EET{ww8-mk}wG`>58k1iu6cg(LH#{AuQO@Cjpae%Cc<vdlOFke+SR$x8y$%Ozj?oc>}B-aDdJFZELXMg(iFOYNT&ss&-XNGr+ z4KuA)@qF5*JmhnM50rIil@-g*7~V)7gh3J2r{xm04-fbBfT(**_tNzPXb`&*q~$ED zljKhe)>wpK(5BwV9Now6^>!D>sSngb9)}}gXdLV^M*Au_Ga)-sC$4-=bvtWopy9*8 z;PEv=&$F;j7`gU~rmAgETZB(!BhzFOy4V$z zhN6?83~xYO1L2fnoe|N9Q1%^avhi=BV)aZwH+3%XPhX^pO zXmZg?-Yv}P^cyp=?(LPBBuOKIL~b6fh@nJ8n1+~dBUGD48TH(MM-FELuUoXEx`+~7 z@R;0~=0;o}#IRP_7jDuI=?XsNOKGucA$}4AFQ9?%;d?$5KA{Ds-u08bqJJstYj}rq zhi`ma7Kr+L#^|;^j_*zvy^ah6)+W!7e^g4E4~xQeCz5E4WP|66!0gi@#=H3486cXC zP{EcKq?s**k#knAt0bnxaZWEj^BMo;17POU=bVWtF^MF)F6{G|>Nwe6D7pBU&|syz zRq#qS!F-}!OWqAL|1IT=`%;d2%i?6mItY_A+7(9R@3;d_UV-79X|)@`;qZ|PpAit{ z@|1K)m(cTQXQ1#&hfBQ0tId-8x|h9Z zmU3kBT$GeBBIcs!6L8TKIF0EDaeLgUtSUGP=sj9fPn(MQL(O4t$6gdq6u(kXXL{w) z#pxA5x=KBLf~yrm>v ztji~=yp(TBq*vW5y-gL0@lS~PQ_+T;_15iV>tmW5WSb5cU!C&&s-KpuR2CyK{~V^| zE;!^?`Mg&?c#SZB(TaUSeZB(~OdOZ$HoA(eSUY7uei09EWWlIFPx0BK^9@K%baE0s zuQ~uV>s6*3(UuVvYpV=8;saU2uQR-R^F^v z`p<+udHr=n8{7j$aZb$8tEec5!7y8x@K6PnPO@Vx}80 zvWHszqR~x~Ri`LY9gR^-9KWTR7<~MCZ5!b_?pSCux}%Kp*BhZ0eSZ&!sTR)Q7kqTaUrm zR}gwKq#H55rWoElv~|3ESZHKt#R$uniHKndOqgb^%ul;!vviORLIaf!-!iZ@@SbS^_q zR!=_vpbncc5Ay%Ug*ViMSA&{elD2ZTl$P&k`Rqi1^-*u)I0-H_gg0qPUhc`&O@21yNIw3s=3c@){Qz#BnZK48G`ba*3@1H@{SY>F zg0A?Y(L@~i+()#{m=O0%s5mf6_#<`qmvQ3JfLgqCD*5R_G-sM;7|30Nx?YCswBuv>RXDQlfQ&My9t^RHy71}n5T zuuHm~KMccEk#?B(&uI^v`evLrZ?&JzX7_K53g(Q%Rh?#F)@&%(G{NQ;IiA0IHJcc2XbSJQMGhm_GxU=;tg-8akgAEEKn)YM0z>jiZC3 z2*v{Cmxw6RVUm=h!{Xf@!>z==;9R7wmyA#JrGnAg2c;*s#k3BF3XJR)8r*TS zeY*`^H{yf}Vtq=6T2OxeVjj?wmh3m8%9oK*&v++_F42x>sA2l`ir-KXyXkUUl(n3# zackAve99c|7#Af@Q)BSS@%c&Sp-zNjr;qyK9Rr= z{mP#qcHpzNJHs3?{VTrAWb>&xyP&PC>0-&Gx!z`>e_&vc`NUO@)vnNXyvQKRnoHi< zpn7qtaN_v*lO47v7mh?u23xiv_3JUyZ^yh?sdpAU@j)flXQ+R$U&B|_)t0P{=)F;e z45&Ps_e8z5K1kR@tER1aWAz$QIU@4941JWbysv^7*;JKXtHDJzC(mDHV6Tr&o*c0= zpC*0P4v(kT44T@1MH2$!Pn4+?k7_>0MZo5dTIqu^G#?m;e@|!NoBmj3dBb4+W?G$q z(!M?vv#g}uDL)}l^GNE%HJpl-Ri)Cs_Kn8T2YtyJH_MxQ)`pynbbW^pXf>XfH`Gp# z^fJ0qAN(~t^|$Th3G&}5wZ)N+H7)Hau%7UNh0FeuxSWF(l^II$PKPpzBfrM5?wP~D z))@L$+%kb;yr&|ll0}phH1We3M5=x6wS41!RGN>8=pjq%y_V0}$vD?ve`iD3HMX31 zrWM`54JE!(DQ?NiMaX~|RkcpJk8N+a-5_(w>Yf~`w^Bw$QD!bX^@FIA^@DNL&Yu97 z+9VAB!~$b3>C?8GGf1uQrp?~Z&TD%US!@{z0ltM}Dg!0n=)c$YpEnD2`w7khBO!Fl z-ZihxF7O%UD*vVUK4OJS71H_@t0n5@y2Hu2;~TY4%sj1( z1Egf!T9Az8UJc~5isf7C>S|8$H=y6%Zlfvds$*uStSnXC*L-%&-#U*iVx;8qV^HKC zo%rVU(1p748P}XQmiq-WOQ&qZ@i~nvg}Uz+>OLSq6I-to&Q-eV#(K6w76E-oF*;?u z(s%$4nX5rLb?K!^ zq+a84X$8eDzpL}*iXqy`J7bZ}e}`x@b78DHB2E|fnN`LFXCzDlsj;6Q5$ATo77{f z)nzxi|8$%0%2D?~9ZT9i^(B$v;vv?XjLUd1n8s_@CMfUdu#~HIHzp+-&ReIi=p*Hr)+F=LzcfQzHDvR|JH#H(Q%R9j^<+% z4sU;_dUDyzkxy&*QHI}>X1S6_Z7B53>$hoqg09u}P^gan zei9>3yTn1A)?bsz%?7i-^Z(z3Qw4D(e2*+m9PL=43KQ697!7O>1?}H7j)k=5yrLX% zNAS`4H>IpgS;-ItaKAS3aB~7TB|u;L1_m5e$e*V2CM{jtDDQFm@U zMKOPN$iaZiX1>D?^kmxYDo|1Fp3HnETK3>QcyU+n^n@xy3nb76{|aeUU2+}Iju@CZ z-uy#vmrkb1Q{@Phf$(|%eAyT6W9XoJ*8ci}xaO>*n4dZRPr)K*l^x% zbMN^z&SlUW)r%9xoHgO&5>-U2?X|plqpG8GjE zo7>D&*-l*~*Sx1~lbU|2k`if9P|+FWS9unkBOXD}4-BQEOGx2UF59o6@1ooZtzI+|4o0T{S!j1w$4Z&k@5GNJ_&p66;Wo z?8}EAroVOueCOLk1rFXsmc3i)lP$n2%A<8$k9*~%yxmI!#g^%v#Ytid z)|cr>4>-K}E^tjWRHYG^xZMNJ-pb=gY45HtXMa!ySAo%wfMY0Mb4hv)O)QR+b(W6k zNBV{d*lLCYQ-H>}IJjKWW_YV?r8ukK$Imb{;Bm4$LtZp{5u;k8SvI2nY?tpl3rq?4 z?I}>+A)b7gJ6(ytqnN*$j#_{FYV1Q9DrdPeucGq+w$ruv{6mGx!%8@&s~2zEXx>jm zxmz&}6EU_bO1-|{KQ}ax1yEB!b{m=-GjESDoaLJIZy3Tz;OjBYrB1p@tRiQW4%(^M zLg4VmumzD>sC|F@WOteMC}j6#&Q=23 zsg!yY9EUR>9>$g3saIN0v-{l$W!S9J8V5z1OL$C#t|1J@`Ep0L?HBZ``7;g2nP)iev_aj)cJQL? zK%>827~G1^ocIQ|>^Mvbft_ys@2brF=B5 zSUY7vZ(Jp;?wjg*&_b9m!HQzTfI z!WWbHhXZ5BSS8uliSaXTj;rZJra;CjFBGQ~KJjV9h9NaQne7Gf09j+{UGi~EJ=XKi zB7U*;o;{UQfCxX@)p?*gz7&D~7rHb4U4tPf$}&^Bw(gxUKi{s8t6c}HLcvCu)R>%* z?-C;;(GdcDEQA)v_vv~wpYcKuHsln1Y~-p3B_$cg0Qt>0(z+>HEL0#(C5e~C@=%3l zxLshH^ zegzyKW{_JDy-Xj&;WkzWUEwULD8RGr?!q+uw)Bke=TsgSII-C)rE*-2>z6zN2 z_GE;Q^9;I=g^Q4pm+PDY9o~MS?su|usWAe!6~($q!NnY%nnlAeS-kSGx@McpwiTGy zTtitYk_Jsf_*!x_;m{F?pDTCs58WlEy9%{71K2& zhI~QUPW#QR%zFOhX=tYZkD6v+QEKF41hulvF|F~$FEiA1%cV;mbTvGCVrp-RY8G~>PFI_@*z;~g3aI26!Ij;cs|*S57_7~|1G?7%v8#j znv85-_it9{i!?n$8rc8FLy>DEuiAv*kF2anTDbO6I9NS*fmeP}*Aorsl@2bkd_PRCH}^6U%4oRy&p(I>&YI-SYcF!9qjX**x5sY{3kWL}Lvnspji3mZsr*qlXM9aqV z%SdSvpOzA?JG?OiVo8(eXeQ7!_lsvDDy;C_iiYz1#tU_4t*$$a5 zgDES$pZnsQ15>6y=)RoF_hQ<)Be(+L?N`*6iH#xwB+T;A28#MxjHu&XN1Yh!T}j}a zzbZ>gZll`}pw3w(LxUT7b0!20KQ|qt$vp0{mwpqhfaY?0Fe^&DoclpQ5|sKu`n}uE zR;@G^GtK6AO=ABOB&#pj9-5!Da$32IAvYe?#`gzB(Zv+}7|1Eo!>hUuco=?_EXBRn z6SA3|a#!7x!zPVC&4LOr{dLII9?Fp;wZAFp^ehR)SkF)BTW(sb)1mUJ%L*#oC>Guo zp}-+tO;;BeG49oM2@E!ztKnPvG%`LzxN4&<=k#gR9U_MFSyVzXinx`E5q^97SKit{r75=;R7U@7PX=31 z9{HFAMVZb_fv!&i73H3BuG`PVOPNiXcWmPk9-%^&Rw&P%m2K@`xXD|&*OTgF@rp`@ zqQ2thk)E=hpbe@;I%?g;yHGRM`jduEf{OuL$miyS8#&CSH3vURtHu9DK{`kpbyQ3~ zsR2Cz&|7;J>$E65>N24;{|#~zHaNuna1lI38XTJ8Rc#TOlqS_=xBQX*iJvoSW|S?r z6v#fqceYSd@ToEsuP?~?XDcrMmeX{Trd^YKd?2Gv znV2H1ZV8wH@=i;kq6nK={uUzU9*#JCvzn(~xt5}Rqx3SUWFxj8zjvG*{boNxqICho z<#amue*jCIdb8(A595L9Os*w;6WXpPZLg;$dPRm?L-d@zaS9wd+LD^{@5#$`b&?{L zwiBSStfKGR`Bx$&>CA_+4N3WujG0bkv-$Fl3aG$JpN(t>VH<6pwoOauSYOOefe~9r zgL-vsVlx00VJk-i9DAn=$|}ynuIqBZw(lzXey+`9$jIrEHXX~zMr&&e7t7$lBW~AF zQ`0*r7mELSxBD|r=FE|?_8 zcbmcxrtzQ&E!URlYr`=}@#mO(=PXZ5aNCyhmV}h$(&Fk@g;^V_wV^jKQCFa8O*hFj&5#JrI>2H{DM3sz!y0JSj-9FQploYUtu<3 z!~Gj$r_zlRfqTYSf<<6vzo8aJekjboL{@2t#MJ6CC&}Zk3HTEIC|%l zel3}BGp3>)Tv1hF+WyXN(Du=ga>Dq!1itkp9lph9XzXx8g|Ce?j_o3#+*8AA^2{+F z@T_)N`Df{Lm>gT;4qh_Ps}Pqz6*e%cVa-T{B(VwS`;)^^uRL!UJB3s3ZD)(lqTr2rBTk$-Zu_h+{0kc$uxUvC>eiC16U;f{tV1vgcI9m4*+CuY&Sjl7;=oDlVUG_Ir|5`KWoMUUJ?J6r4xN()W^0#-2a~CZs9=$rhX7-5! zr!q|RMNtA2RI__^UUGlwa6N<>yAyqrTKd#rU}hqdg$;7>?`< zTt%Dn6hEKdX}tS2@6G4AkmALt+Sc6VZ&bc9$Dcx79Uj(A-ErZK`%4A63lck8O$bu> z>~a=z%>*fU>psjgIGW?vQewS3-1&CA+{L2OJswOR7F({%TDHpEV0Vh{09U@0XnZh8 zP_~v`ymR8P(Ft^$eL(J99zPBIP;SH7xK$`pzBzzyqV=apBH!vmA!QRRut@>e1gSt) z?lE{r9M3I$_+rRoyahgW?pyBqAr}4taDk=ja-dOYxkClM%fa;YrC?pMsmjGG=;tk2 zcs_l(e^FGRxui4oALtzafu*1M9>(ShzjI1qzxwck#0?gi^P4z%6Y$?AVXA|i+7dmOMGFV;5JnZ4l4c60E^u=R%z9jjK4)bmuJ z{8H(562wq^DWl*R%zlCV77usCg9lAbKUO|1XU?}At5D>e$C}lPz(N;pBL~cw39BUD z^GcnFo0VO~Yaz9HcIRj#?XH@KUWkwQnA7m=WK8*6^|&PLL(t7F5DR-vIQaR08V}`l;L;tXshn$o9Adqs z(M%!=@4AG8ZGjal)dfK5fkM~bpPAraKAfgcpO}@wc#+Fdg>7YE`%2FcjqQhCI!4iq zkoo2)P!O&Ch;=C)HNv5mCg@WZS?BiSF>t@XPUjiRN5z*&M8mjT7;k4x64u#J5E_e; zt!u8_e}2s#t2raROVYJ1`c=>&vZE3N9XT&57=fL=uJ)eGiJKSDaAbx6ZX9&#d1!%vSSh z>~zeqXhm7r*rt*ovVmJ`rf|J`LV#ntd}MVarp@mAPh(L{b-Gf@kGy=bY~M- zqYv(QhG)1-o1BPSasi}Fh1Iqtwh~nC(7Ohkv@!wxpu!n7Sfxop$f6OG((dNK zakB38IrAb;oDFGEX^-(th-$fB>HRh7uDX9c4!yyUmCf)uZT;meyHrZ4T(T>@17IO7 zw|h`lYoqsnEKgx@c>YsUslqyN;mz+j{9}r2-^__fg(B0#kP(e&J6;>RsL=!AF^mdohfZ6PC8Y z2|v8jJGRP4o>pVQ`sc@mJiZBWZuxqbt)f-B$nNu0k;~F;wQyOJjvxIO+9ObNiR)!> z+hg!ZNLjTZ@YAX9+z@>#GU>Uch<~YZl}&mm(NQ-IdRyb zefea%{?6}eE4%>7^L4Y;S%xY)-7e6gcV$s67W4aZ2Uo+?&}Micd04F`;cH@> zMyRM(Mu;ja7BU`<)ZV1eW(|8^3~zkd2M?Y*j%0Nu!05JHf539UwRnDDVf|yd2h7+A z&eYD;i%QoRQ)^6;vINx^Z&XH!q;;F&Y~NI7Pwg^iiU<>7*xco+#{Ht&_Q!zEYV=1? z%^eIA4Ebe1Y>_3UaiF&fAPYUM#$W~M+g`o5N*CXD4OWc=MR`bpk|szIt+%8=(s0ch z@XvlJ2r?bBn<#mhBi1-e|S_Llj$f* zQdx0P@mR@0DUx^U5X9gz&M4Kz{9x>K0&yW(E8VFQ?x`6r1QBN9pzp*2 zdQ_AVB|hB?VV=Yt?1)#Q&gOiOSa;ht8&|Kd$y5m&eC2J3XEQ6dsEGf*)ViAq?s%A` z+7(eH7QS*9cF(UReGBnQU!Y4nXkIKUl{woQ4;S*pOf?~91i$?z6xns`f9O=}FlP-E zU9WJafWt1<$6Fb=e04gPn3jrE=?$kvyj3sNAD9US4mv6!l%elju=9pbZ%^chYL(zT z7$RlTw;v|k0&^K_{b`**wZ8yQxoY|o}BDLKZs+q(5y4)TvlFq3$`(Gb`S zS0#ySw1pAk8b-pf{KJ)4Qe4O)4J&ZY!aMk>wZS?C>v&ZRs@LqN%A@mc3PG!}8GCik zCj%R-%Bt-d>6)anxA(C?`((i|3_me{v4F2q3K+UOR&3o|Nm&s7y$Y!++rN)Lh9w$E z{Kp62NiHH-ZoO~tBMC5boiX8HXp8jkbeY4%_}=`0{%PdG0?(PrsE(IFp^m-{Hm($ExrNH z_|L|2^V&bWRoO)9iGntRS0@kRVhw1mO>peAO7XhvQwge7(ww zsMF3FeBK$MRd^+3%CyoHEaO7m$-c|GYS200=rUr>8jTLmHp;%ph}j;WX5!Cx>tWR0 zU}=Ck^A?Hveuup@UQvgv5O;GAqV$e%0I$bPFCcNY_$m*8A1L?qDEBrg4MQHdWM+i% zA5M{OeIMxdN8u`k%x~PzM4hf(ABf2z-?PfTFc);+6{(6`A^!)LXaj3(&D z`ooJ#D^#4gaQ9)i6{mKgM;RGAVEx6FG^S`0SyA$)+MRjZHZ!{S*TD=9^u;K7wWEKE zsG6^4ocC!JCG|KlCv88(S>qJ@PkhmdXoGUbgZm&gOo4!H~ES_OkLNkO?%PdE?=r7Z}vfXMmo?v8~<>~u&*i*Q6Gls_9 zb^3vk@Lc?RJAx=*;1Dg?4X#j5&bsFW(ZYyWq`Q3>P;_+!+ihONZOTo=gVTr3MAu%{ zC996~^L6iTY0>iXkj)D9DZ?+MTU%gqfJPgfsvA`zuyFji>y2rz+Qf74Qy5!qIJKH$WznqFNN4VD(t|^v z71#4}XO0N2L?cssfwvfqP-MkEXLbY2KHK~@qD!EG-OiTft~^vRY(uABprd|ttlIK|7I2+lQ+qPSOHE;GCsqJS32X#GRK<{MeF zVM2NAK%l~OdIeBJ%n$DD^VpfF`gi~tM7hptBlY>sUpw*(1|NM z+wMI#U$zJ+*r7#*f(Q*DjeNe-a%M9f<2`xK!JaxSsA&1wYy!snn~J3?PWj+76DbYc z6vifxu<{O3XGAj!+_DK=zu+Wy z>oY9jv;(@}`d;UYdTUh~NGLE4Z6z2Z8Go4ru_*fptM=;2Ud7=E5CAko2Z zisER?8$G;`ns)y@OwxG4@86YIktjpGZzpTkjZTH~TGoO$A2gd{4?ZD4jZdMZ}e9s3?D~%EebQ~h} zzg~moTYihnn1KJpAt1M_-^_m8HNyMDlC9@*2t-pSNxR%Ev36iEm!2)ne@J;N0$7N~ zfP_3@7^IE{o?{iQXDhd3}Dfu*<V?jGn$ESQ zy!uUy8NDVMRnD4gx8zGVhOUV{o38e#eK$kK6FS&m9NJvP;pbxu*j z@QXp@^Pua)2&+pfBo?9Txu?9{&)3vB5z-R+M_D8%6}5}U<2o;&!>4M(jYfz%#Ta{T6*z`iu1*q z$fcB@pLePvmvfTFy1_XT&Kd-=?RqSy?QG{9$!%kM!^%hj@zNWj_PkLudkb=miAbtzTphspF&8A> zJ$A~2%7~Kt$uA%7hJ>tmG-U6#*k`WB9T|rPLY_HodAx9n)+#)P6PViV0rU{|@fEFz z$>(81QF_?L@ummCru~E_$P6zyv4%vN8YDBzfJhCJ-LG03^$!xrw4ZTKA3{bv(Y%L! zYlGy8YCEnA)pMq0=$^Y3k2}3yZ8yjV7nxCNlqv3^=3{?*pG4DW@E?ehhGoa%;QMlV zm#CT^E+xME>f0eTd{!Jx!}wWbTN$H#UT~o+3sUOPKx1 z21e3PDj-H2xoI~gjq}Qteu2Ad=jNzkwK0sKi((Krr`H@KCfBjr=kR)kK0gT{1$sKs zi?R8LL@?MZYygz%7Y2iERnH4 z9lw#NVj6NIjl&G(H+w0W?DK-0v6w6`fN+t^{bx_~fN8tQ!5Cxr`X2&Pj^`c7my3VYB@of%! zrntfMdLquqU9h47k-4hmrTI%opN(+7(t}cB1 zHa8MTG6>6LK&v%dP268~B<&!mdhO9DM78ptn5vl9%A7|)pRUyq)Qg9=WXH**oyIT4 zp!gYE3Y@!FF;Y;3AFRurE&=79f4Xh;$6IKE_`F6WLzd}IA?&YOx{xcQG6uskmin7% ze3y#XFMb{IlF}rZ2R+{cG`?V*?uTB7b1Av%Z6a+1Dry>(Ey zVQ=;JuI(1D1UObPXT@KQfU>}iUxbheq1y2;JCO1SVoraHy?^6=LsgJb6r_>(T&uNC z7hE2CJHv$=oCh*n>Suo;P&()m$+#vLmyo*i^(WE`LfTC)`@fewoe#->%7gWtO)y-( zSrF2RrACbtx13=J6qr)lnL79y`8EP~Q$kd(Y#Nz6v|%dKVP%;ywTY!FP$ zZxhTl^XdCjTqY5uAP3TJI^!O))I33G-{S+E1klOj8GMy2U(en_RuSOk(wYUyB^_^r zC{QC*`dVMw927JNJ=v@0fP0z9s%$4`_F@B;U&NsxDF`lDa3&(v>EU|?w@Q41vphsz z0};>jT7NE1@r|4+4?jtn5*1E_TCjNT;$%SoiAZDJWSPURk;igBS6 zR6{p=BK>WXF zNC4H}9gWkszgKP0yxj0bEY}PTz!!4mDe@9+@McrpL9SgsPn1?F^V9|MP?7&F2vG#q zbUm+cYZ1Bb)El)*xd_Cr4_oQBnuL770xyIp>a&C*Y0!N>k{}%6tdKoFAcC1ho_7DE z>(uTML~bXeIlITA6!5ugNLh8{ysV-Y^eOsMehE;$+j(uU z0T1DPv3{l}hH;()fTUEzzyExD*6H3cF=qc^zp;<*KamXJ7(Q)A50f+R6^&sd7Xe>4 zGf-mjbo$S2&wfEss^X+CU<|>@`o^q&KVbg+Q2C~J;*D*sxN43Uzdx`{eYENdi+QaP znrCKy?W07BkyrYWC>ZIZKdYZ(Tr0fYH%zL+ahq8gAwML82*8K0%qFn0|8YO?jqd;b zbsL=h7L*?nJhvgxGLW}jBG>YNf1Le)_GHPbd%Z0dOgz(qp0z(|^X-4E4jcl|P3Bl` zp-_{=IUPpsNxIlnaA8BiFKdqJ5g{^PIj2V)Qsv{AA2G!rlpZyJIg^8+vuYG@JqRX-o_OcuY~o$GHM%J)+`4l zCSW#Jnw8l*6_g%`s zxfRPe72h~<_?EeID6SK6dL!&4BjWTYvRfkSQH49lMD3OzuB(CU7g{Y5>Mb{dKbL=A z;8u8qQ_+l5v2I=GV@=&h0at|dq{Q@og$j2slUnv=b(~D%!g$k-EmPaJ?s{sk=+ZT1 z)0SuF`5DqZG~Zv6y z!*_~fWl>ikZ)b+Zd2g?s zUS64Nf0ilp6+7xax}m)E&hn$1vn9A>rUc1N3kqZQZZwQyQCj+kP2q>OpCRkdTh93Zsuf4?3m%!`-~TM6{xY>GJ9Wq%usqN&WgD;Ga)a*))Bnjq zSwiiLrWsr@SQ@`o<8P63_TN>Via91aO2Eu}Bunv#e%!ue3?7>4UYfg^x*Ia1js+E; z^EnVD+@DsM(ea_kC)Ali(M9Nkput|h);>oXootYeE~{BpoL=u~?2r=NL+ zI*fX3ml|arcxpbqEp?<&@q_LL;p6{TGxU3E?$3B=agOQU;g8=JF;DZ>oS%16LS!B! zz^)!yw6aa}?<#Tkh!^3$I*CgY*Xn_)MPMH*DZ_`m*NgB?(R-Qkpjitf)samcc)lzDeh35;7};;h2n*V;O_dR&-1?D zy?<{;GICDN8GG-w_C9mXHTO!4sMT-|J(9N+VBaC7jmy|;FEcNXE~{69BvIJsJJ@?+fpgoAqz_vzyYP4C>J zJg-a*t%u;vR385Ms3Pocme}BL)w+G?p%qVWaAnvq;(|Ln?euiKJeFFDQTOzT2npZV zutW!=>#A1G`8jFuh|kYWohA{aWKAgXj4GAo-ui6wruv-0M=GnST3W%1SDJ>r^%`6rfwL6(fE13?soGYl%5;$aM_I)X?g}X%(=7-f^NzW+ z|6U&R$_&yv90wZ|8X#H{of;dR+BE_Ph)%uYO#xO$ZB#*3dmrwth}x)(>e73;>`gtS zGF1?Pv+Y6sG2?RW7!bVyh(_zv-QtG+JYa?1pJee;hijvr3sMhJQ-A!5V-|-)(_I#* z$SjBmc9x%C+!O#r%K)P7sfTc>hr*~vzD3}?L!-V3$MFltN%BeT%nk50L~W#GA_DB$ zS~~^fm^I;iQ$_vC9Ge3>rXdS9&d*UsfUD)kFTxX|v?+AbvoX8iIBbFGjceRIJ{x*mR(2NJ@`eW}@kMqsRVZ#cG3IvEA1w^$Q7`^2F*h+uA`ty%x6u zKEOew>YZN=)ZboP?2dyC@<5)pyYdJt&EAJGpgtf(>QaxAY-(68ZIWJ5)=xo;Si|(MZa0 zw_DDBlF)0Y$h0J);mMOQ+8*@0Bkv0O8sZ!NQe;_0hGHJ_TYaoneaw|Y<8KfS!2L}m z0=OhCt~5=1>+}2KSWh-7iWz(y^)YevG2XzNKY?4P>SFg1$U+L8^LoXUXX<)Mzd?z}!dTAO1 zGo+qsx_m3S;Il%9jDp_YPRlUDd`Vgq>=H%6^YMV6?~5L@MP-^Rc>`9|79qOKBpV@s z;14R*_u!y*YveJ@G0xJ7boKncRa(#%$@}g-_u<<< z9+UVHVZ~f!Cd3Rl3-vK!bp>*+cO$i92g?V!NaMtSc4M$e64<-{H&oq(#933}J|u09 zrF0FQZQ!dqJa$wbZFscTgR31@fcgR=ONA{(Gkju4pv#NPH2jK4Qr?*m1AM`!SY@Ay zHob$jNYDysjsR9j`=tE8+S!G}7C8e{{XK;9GlKIf`oX^_fv*z9JB{%*S zuXi()%ZFV*1SjsFMuZ#%{TafA*u%Z`P>afWj0?Iit@7j^w(bYtL#-2e54eFk>wGM5s1*K%}UdVm?2H-V{XBUF7fCi_ulOF z(^S@Pb%n%WZwsqrcZ(oix`ecq$CAgP3+Uve|Q(MQ0H&l`Z$a4`m+2|uG zNXy4kl~X$$ES(NUNNuDgX{m4}X*nfnw9f4g%nf~V+yNo+QhxAVd_IkrF1m1sr&f!Il$g_IZdkkM4& zK_2!geRwoZB3U?^x&j7vluxIaj1chx(*ZG|CcjwEFn9Nn?~{hYJ>?l+;^V%R^90E{ z_WM#kgf@82olFG@l;wC@nRE74RJA~J_ zay+GyY+gOP@A8Dv23=|{ghOf1F9Q1LgQ3oAJz@y{7;H7$-EfJ&8^~6`@XPe!T=4TD zt9d?nwYd(&2!76VBSG6c^p-pAT6bYh&pcg7wW>^j;4~ic%&64C zcGg5yi{AOQmp65IvrU3Y<@sP(`%*0zPKzWrtLdz4Q}AY3fq5w$EZI*Jo*7PyQ7mBXB@yasd@oeZ@lBoB!b}vxRC} z206$C`P?}Ecn>J_jLCaIuPbYtzZvkr4Py1o)SG|W>&R){E z$nN_6;vkQUaiDD(j4&;bc0~LTyPERqx+z`BP%2X83s^Xigs~piu}Pv0 z&JugP*Ve!7&Hs?fSTw)?Ki750lvFL`RBJvDm&FadCj30akNKr9TfCE_xKd zdr*jLCP3|7xLH{h1qoXz9~$*lp9+jFs*k}5UJ=4t3T_I*>PkhIq5ST1>SFw0D@a*K zF}bLL90?XJqIrXDclUw-;fnZHht{c_?r!4xQspH`73^`sj8TeqZhaguM1tm2bE^_;CFwOB$?_Ef4G4od#3iALSrM^wnlrKr$BO>q26 zP#`B^^ffSQ@l;X{oWX3!_n_uT#ux0?DR8n%jxOUMoSrw0QQ%Y}R7Z>W&bxb*p-2!Z ziaCgx;146+hxD{LWW)F2@^3OwseKihy?pa&!!zNO;Jl%Yvq~f`dbL2OzC0CEHm2Gu ze_H?_xuj3euVcqGm*Bc%gL&a@Lb$fK9l4x6n}sDM)}Co;Sw2A!fnNo)TqX;W(TYmg zUZ8`tY>Hh-y;V8apX`&CKz0ac#J7FCxy2y$F@NeI{Vik@s|fQ`gj0kMr_WYwOpvvh zdL7@uCD9=4GCgIB-Q*3H#b-aTcBTDt4`+n{VeD}Dx}y~&W6=L>5O=EMJg}~((W@P- zS+5D<;tWFSxGlH<0cPFNM9LNZL1?EmDlV2G;t^Ksy7PMrh^|USiLwl@>E=1xa93xF zRv8Co{u7G>SRg65s!~YlUa<&OvM96C7R&$d#HSfJjFPl*z>H(}e4Q2xBTxLvrivwr zP3o^^CqE_vza%SO^LtNp^A)$n1)IGq+&?Gk0rPbKouz`pFcxNrXQ3171D+?x!FmGX zh_c(?FUSt_>UZ`(k|qM~Iscqw0Mr->h(J6J`$UP^`1_JK!r#m=VXf#|vtVnys51?y zgjH5-;LPkdPx#x?EWMr=EFXEEbVJCdj>}TS2tQ*m)4ORq?Zg>H7z(-afcP)A^UzVo zfao%JHfbNUYyr*FB~uIGdxfi1zK{sa?u&uKRW&9u8sL*_uum@q8oieA;m*n zLhVC3%B=){UUP`o>|+`8pA&^Jv#E>gzZSV$#TJrLU{>_E=t`wx&$A-=kGp)`BhOJ1 zOGz@tYQX*7*b9(TS}(zM3{L~fj#Edr)5fz^J9xH`Yd1%?)e+xmGkfg2N6vzEJ-kWN z@B1t8@!IVM!#9-Ard#VOc%+2>3A8dhviH?IW5C#lGBp&f;VyuzvQJ{}(?D{w*N=$q zPs-N$1|FI}av_JLzqF*2n&l%s7At0rGxb^^TwY4RLC--8LqUj)eueE`0uiLXQ|?|- zZIkby&~|D&{coPgPq{elNHFVXyd@?l!(Lsu;p;f9;%=_<@w%P0`xW$z{7;ZHqr+{CQAKhuE)sxpofaa#S(_bun7;LmhXm1^v-D3<-iQBj27R@f9$|K zq}iM0i8VD+xU-&Bpz1hnb@rm`@5bm_oztHVk^Jv4JLRjm>)1Ic|H zSz;0IJ?xenoXN>%W*5BN5L{h$Mt)d-5L(wdpJIr(&{>~Pd$QmmoV|Y`P0yEndRgGQ zl2xOGt!~$Z3!-=hI1|HHZ&`NL;%?PTh+RlM&4m(XUb8*E*IRH+fBX#@qPjxHb7#`q zKna;s)1bdqS!WXVx?i)&R8SsbO zvtF5M!0ftZgaT%*_~vt9T4Q~^Woad=px71JzLA#HB7xeAZ9A-;&{DN2&D!<$gn6s6 zEbvY|%+&bR&}=}#ZB-S360=TiBUH^Ln3mgp0fB#|I%ZvN>TCtH8gsBv@ZqiUTQ4sH zkN69n!H@TfxaedSs@D+FS8OlhsuF4j;a|ippD`Wk{t{-K9c(A$&Xl>0KGzo}X@1(9 zgoo5e#$14C2;NY_!pK~RuoP<_b{A`@ee2+HWaeL8;DP}6LTaaeDTs`CpO;yjzwQN* zG;VPiDtTnOpJ@27A(B3BP_3`==cDiu+ZeF_zPe6(pA^$YB6SWcvR%KevVt7Itzm&g zm9Fkc2pEDl2*;@|O#@5Mcou)_U=;qvz}-ya(V|dKo%T6d{k!8M_r}A`d!ss%QU14v zWwxLBG1CTBU(h3Z4{X!(S#oB)QAN~@6ntbAIu`HUa2rsp4dODAd6qX8*b`s0)<>mM zDWp4Qt&AMWss!k;@_{$Ge@_Ipc}}gxmc)b-rUFo^f~!Js$fEfDgFakiSAS0NtXM02 zDz#<_cfT&c`9?cKyu7uu{DykSl6q*@tGD9GVo`sb3ABRD%D`M?XgjMv?oUHk};7hEWw3dGjC<%;w&@`y z)C^|l^uBk;#wf{Kd1LYq4(SN0U*DWbnk1#U>Yo|e=DtaR8RN?o(o9(KMtrqZ_Zai| z%g$q13jLAZr4t}`U(L8HDb){aOJFo8vVF_D;X|3&t!KfAeuIapfZP@K8WB@ zE~^%B~I{SV>pzzHYNfZ9Ec>MMexh8K%IqdpVrq0_068Jy z)(3~Vl6#hfs{9|2V*bMFzY(ApwepfG9k1Q?0G4LS;Itt0J<=b;mxB%P1<)}E2hCVcy zFaE8whyfrhRNfCOP0**?srbxgx~6+}zcRIc$E%03l^Ggm^2j0uQ5ULdYdRghF?jo< z>7cRK;hwfWb}`M!LFLh%Fn48uR)?;=KIcX{1lWPLi-+&f67j`RPv!wai|CVsg}qe! zNy8J~B_PU#d5pSeM2aR=e~zWwEsUT7pho;=r(xv|!`4&R=*|una^1Off3D5(-Kg=A zZ2uJXjn;1S)45);>Cn1i+ZLaVLcXY6W>Aa;Yc7%P+?XCPo; z-^Phq@8gQ9kL#{jKAB{ED#8uLSst4v6O*o;H(Yry5Y-6Q+U5c=OJsmvqJ)MSSF?FhRu1 z+p|1Wmdk`3|3vN*GitgY1(?;WunWT4PaEq`{k`V$fyRpsGoe=vg2#i|f)IUobb+T&_hX)8=;dJ^#b7UJi8n!_z2g7swv|9y#*l~9qbEdHZE8W0#>dP9 zEvB1ed*3hWeqFgQedVx`Azc!FE#Ew=>jiSEtKr9US0t$UM>*mfb_HutfM}z*n*gy1 zE9Y(*m2Zz2S%SdxK(J*c!nsZW3UPvXw4Gx`VHGK_)xB?>LKOdFoZeXw_a1+!-=i$o z%{b88eX&OTw<=NVVuNGFheWg&Rk>C6*y3+P!jg~JuW80?gNFk0i)^fA8w zO)AFUbAcJ;(A!%_+IY;%h?}*t@$BDm?%n6G2uoA2VblDkqTTfeu*cs4T~J&k^&M}* z(;dIAgXh#=qInVFj2rFO3tT`tqbUb#>GkpJ?wT@tcYghThKWbpt-o43)C&E!ro3io z!t1c@Z2f`Vcw~?nF1@MHx^7|Ev~D$qBdYfPpr+S-#kH4$U`&jYnGZfFs2byA746QD zDa)?^TPWngf})9c=-q@fSJknqTQSAmZjsMs6ekX$GJyNa4Dn*~YR#9xSL&6_kLq>LFk!DMSK`*|N+;*5iCgxIHWNZW z$gd>LW1DlSqhF||Y70O+0PZX0cqW|in8bo9a;{bS9e8^`r#A_?`s105?#8|z_(S=8 zTU4sG{CXNzj|sA38}(Y!sO~9?X4Si(a(#`o;N%;Hr8m3}FiELp5absuat=CNGojGd za{j|wz(QbMY$o*lXYgc`z045qn|imKOnNUFvhA@q?XVjwl5>rr9xUN`OtR{ba~S{S zKvQ1Wu6?Z8HRZ;qYXpXcm_e$%(K9)##k-Xv*_GqmmBxJha$aUOZu7R)h71A|ZipG| z%{s-{ZBC45&|I|?$V?DBcIP@xyy^61)QcQ&!a40`XY04ws3fxfB#52+^TMaAZghI; zy`*Ru3_;2!yuYXPrl`*g-M)<3yp268hV3|TYtJzqnh3f>1t0riD4Wmk-o=rkrx%4H7-27gJ+`gMTc>Ql3|&P)tX9z>{wh} z?vFzZCIVFT+`awu!J!nip3qt;@OmU%>vV3o08811TqujxhuO7?Ju0wKN}kS;Fgq>F z@eXl(Z(mT@Tx^?oXpnYxT>3=t;5IWWyS~bo5n!{A6!2C z)(!eTSH!o^=Xa|2B(Sv-$j?BbrwFf7DjL(O8YPRE~HKBbEMlt#E9c|7L z`u9|_!D*3kk5kg1Yu?Sr)!|L?qU|~>B5ax8qVgo~+iAhot6fl!0kJ|`3~t?cisP4- zh*BtyU3JAgadUeO9hUxbEwL?{)6&=!yl-zQPi$1g*2yw}oZhY%%%P0|KZhm1(NZ6K z7mhxtCj0>DpQ);0I@U^m{1lSu|Sm7Truk|0~&m?OyGiC^(2i<+EGTd&HtT z^eyJ!icf;v{}&~DkZQX=K^27$DGHusl@mo{D- zrRh^n@<^QI-+K%1g_Izg{OT;b#3YxW<6oDe2nhc+?cI29)%eGbPx34KMSas8SgC#d z?Ol@wOj^JhFEoa?)?l{FnN9!83w^BJ3-2DYIh8_(YKmvdN!+G%u@r#1tt%hwLULR7%HCB)~B~eV)tpE{E5a@Sxg6iXVrV|Z6cVbQ_ zDari067&nZZ^Bl3fnrv-hP0j@L^EQ=tm!w4u>Ir2r)B%ZXABs~a$XZ@{%@{_sRyhl zXDcFukHDL@=+`L)aqWdH$ww13CoG-y@6u#Y;#u=qRj0N&W$7O;c)gFfv^@aTi6wE~ zClGQGT;ZW#pZbAHyN zI(wkHEw6@lONjz9@Js#`H0Q?B(t#9*-Ct}A#Bi2LfYJ$V}tTR6E zZKcaWruP;$Er$88XFU$jBkZu0^tI!)h~tzGb%`k6p*hT@JliU+9B0NVwx4zgK zghl2J*7qizAZWDr5}V$81@c`%}We%_B~1cV%==K#1h2M z0nk!6rE(@rW(gNUdy-bT?^SK4RQ_K}lwgx!h0a#9X7IE5kT&bdgL&!omIQzrd+S)( zzIL><$Co_dv)J;d^V&P0;Fl7!^Bd2)LlmX#A5#;qBAddKhn+`j2=ygbDRVqZKj=wP z=Gz}ah}Wle2%?}oW-5FSKUpw#7m0gh6Mcqj6buxY$gn>z0%sa1EgL@(IgOkDnvp*- zRv&9(ZMrinkNL$Oh?djqTSCu!KKF$)C}hx7c%+FHLP!X)eB{^on>0wXtu9Y`gX$pa zdFg9>U+schwHRa$m{pdhA)qZ(z3z8Q+wWkzQ&PsI^+Q$vhU9?hEL5JmK!P=jj1{N- zJn$rHOO^LvAXMKe!SUjV$UK`;lxMH&aHCM5}N$SEwwq`)n+~>ir?6vf{%%1_f)n z#>1x6c>k@hQPACUrRk&>I==^m(^>*sqwTfo{r^AEG1g@-wp8-+o*_!8Oo{=^C~GR6 zOn)lkwUUwAKm*lR>&Xbr{hIpLX~IhavGRlGV{?oEQjpieJol$VE$8O4=`qK)%b_Fw zFS1@mn|ZGcEDXO6dx8Qzk>~XEbZPnMN=gtmilwLBO1d9HeXc|5yCPtr*Z|mGI~N!h z^7A7^j0vHXT6AI_hzCmz5`XxyJ>3pl%W^Q8dc$(W)axr1p7USd__cP;AI6$oh1pS9)(sF5{{)J!q^m zUB>8PbQHHGDz3<#A7z`Z{IF~1Gg93A4euCZ^QTd6X2vqN`k79FdPp z?+~r^|Guu|`ROZo%UY`-JuO9JHE1|fM}DB??d*@@Uun>QR~XfvwR|FnOCwJLm;Vr# zi)yK5GSvB$vj}15E5)t-X8Ol20x0d7S>DT|Bp~tJlO{U7G!HIK{Mvo>@uTf1GZ*)F zlqZ`-PBAXm%-8K<0=Y^ zMDV$~z^TYUqxC505&b7?SD6=q`U+`KNaE;*_?Qe*h302E>M%HN21ukgRn>_V~{L(`Ha_aI%Qf zef6I?`>?9m2> z-NtF${%|$>y`cIkOv};Z5o0UkH=_{aZ@{XzIBYnm@XrQz9X|OnS09a}==G{xbsJt> z|Dc!IYqJ81r4_i+^%@mFja0fZ2~sh-?JG8Sc$LAdCZp8W>Yx6$HimZLGt36li>#&E zP}6%4qAW=@?^(o%PY8L^AGHRc^^^yUUH1@RZ-lG%HT;_CM! zITU9G_>s%9Gi`@iom5`%S66*G%B}*bKVXVHlcL>roMI&whz&Nr&6l(FuueQJGr81( z+e=-Yvpm9}UVWO^>F=u2Mt@-G-g;a0EAAWh)_&})Taf9O(@f;u`p^C^V}n``b|=`y zqM5?}W#PcYful|I8Ed}x{g!R`rTF%NB^a`)>1r6U{=4!%Q&JsbTC=8u9KD6VUJypc zdU=2QWU8=-0DEOo+24P#Uc(j@F*oFTN2%)3uL8st^|*o)9`Dl8cvSSj)_9}Rgjppf z$15!Y;(E-veiEAf8cv)@$kX$nl}AyX8UpRnXbWhmw*W&Rsg4)vJ*bsvdLI=Z+ll+xi- zs;<9TqqZB)3hPZT?h`fW1rMKcRva}P;2nuu6~Yx@yLHsIekgCO$ZhrNk-r(X7`o}` z=a!D3vwPWmb}Z)LBDMq*@3vQr!uHLzm-xViRc04|XE@IGcU{AspKR*u33&)l&re~g zgeZnilVeITmAnIlt&90+g@_IS0YfnmJw#N)NSqqOD@RuTcB>#@Rzs5dDH z1&=_aR7Mk4)^i0qBsreS{Z&D`u2vS!jnHp(=kB`7Au<1?4-Xmcx0ucRzCj_c*wsS3 zM5GSyMVB5$swb6gK5?8Hff9e%TA3X3ymVTc_dS1Kq3g`_ZW}CVF6O5*7X)dF=%D^_ zU)BqPcZ`Y{1ESVm=pdH-M$#foWu$H3swj?(v&^Lv=2!_ZHmXmq^iFz(;V$Vfq=qyE zUzqF_=^klU-ZOPT8gyd-7eZI=o5*IA9?z<9*om@fG9gSB-a?S`12Y3w%GGVM&Ow#e zm`Q#Tn0Z(x5nK5_$#n-!A%T%AKs0WdI!J~Xq;B4Oh5PlSpj269HF3S6tYyF?dhiR& zz!#Q_G46`VTET=FSUTM4Mq+3kR`a#g5uW9MOeKo)C#y8--aH;>6u(xyLo#_zj1moVG!1rcCWzxQOx7+k6;v-zjotpa86ZKSrBwPjao(ukq7$J&WX z@8X*Egf4bfro*9`~y@V%}Pea8QHx#av90fsp4aYB3YH1(t zPk}|`Lf9qd+En0#lz3^!AT;L5oTE~FfaAAIJ4jT+?Naw&jWzNF}(MNYZ9^ax6RipV5bH1c^^OB@#NaQbH>-c z>wLrQryG@HQt_pq?JwtI0iW@vYp2Zj14mJ&*Q5j0&^S5Q`YNLnbKSOEU9ra(f};$44VES^zv?3KB^jH3ZBuQ)4Xk2XK(+fWZKYhtrqi8Lj` zXh=dmV7Q;%*#M!~9ts3IEie!!r>uHNJ9 z8#nE+Qci)In&*&d2NAX)FYZxyr4txdyuPZ ztiuVej*HKpHXTguU%RYN`Z+xgdA7ZHyLIFcR)uF8v5r8Gw03{mQR03)-XZYalL|b9l=68Y9fbdNjmYRD9g&+bfH>q=^4|=#oO35{Li9z-wFH;neCko+2aBG+ zLdyes5)WHo`vw!xJw*rw6B%#O*%EBWpJd?|pl)-?#euSL*=ps1 zjb9pJ+)~f-pVfGp4Chl?ig{E6Dc@nh4tm0AUDo7NLx$7Gd}URuJoc|MyK#?OcN{b{ zKKVJ0&^E~OfH6gcF7D#|*7aq+OoS=T&RcHMNV`GJmQLmWb*()7dKE-OWoz@M-0-RM{Dn4VZL(#jXG2 zaQ|zDy2VQf#!{f}r))FFJ-YRnhT_L;y{_Wu#7|_cRAR$}D*nOL)G^XDIUWliw7gV4512amyRynvO1yH))DD}7H^7=tBR748wG0ry zk3VS8gnd5uyif93JN}2~i3m`oQHH0iEq|XpmZ^b}n%%5Qv)3cZ5?p zO7DUf2T*7?+cS3f9i+l2x6+_b8Db(6l+~2@zC_#vR_LW`R9tya5P`9T4ie>vhNt-c zuigVzZW7qm=sihcciW$(n5*i~@UhDOCBmuZCR5OqJXzP~;!Zc3z{y^^%At z^Rf4sr^w8W*ghW(;z;HVMkY>GT(wZJB&k}sPW&lF@-iMDdtL|+;Y_Uzj67%F zGL^~;J%5TCmsj|_^Tg}~e)nhDO|huL&X9v-xpSi1#kufBAM7Fd&XKu9MNvLFdCfC1 zKdDvkU;zk|fVU@cnU9wJqD{q%gR|EU$4vwD2hyG1mBa2#Br;s>AC?;rtZVlEq^tNZ z!La~;?z(oPQWI%!tEYlr3n&U{)}7I#sArv|{}ObM$0*^A>ZVqg^lbnEHu#4H_i))T zj!Jgi%45oV-9?kOw2tRR*Q;Lt2i8UzR!BW^bao{=9FWzp%An`Sj{lQuO=7vD{D{8s zKzN+VL9FXai;Z-FF1yAB!B20dIi8A<68{k@a`8 zmozI$9mnOtkonRH61>q8h{cl=x%_838#H~(_(K_mn*11c_BHMxG{ne_sgTXU-L@K@ zT_;I2DYblDn>ICDj-4n(PuZ&(?m!#Tpk6#YbN971qwym|(vr`)Ve4kH#a?f8cWY?B zh?X}86vaeS@9fCokg>OJ`}57Zpw?o4{uqDx8Uw4IL5) zS2Oup5byanR9SYmKQZJhTo6}{ygNHx9jr=RORvI9dHpY?4q%okUmp>BnB6@RL}(xJ z-|3BM2$rPU&YXK=uoe?(Do6YrV;!(;GG{DsjZ6Q_G)ft#xS#y~CwnVTdun1*EA*?t zIU-!!Aloqh7xs!W&-59gr_x1JUI-p8D0I>G136{=(_%QR?6d$T8QO1ngb%9C*+pPz zcKiQcvc(I^04$|NADj~OC>nEOhOeoM61U;6wa_IAud?qolhr>lFL@I8z7rbllQ1G# z>&#q_Na0|=rZeV=tk~0Cp)~pll$4yPZu1znY*Nb(hI5)pQ z)IQ{pV9-ji(u#1C8n~e8E8<%2xHO2iR`=fnm(m1;ToFs&)Ka5;{)p)?(j#T_%f&s0 zeKh}3$cFaiV2b|d-2`=$>&|&0h!92L$085xA!+RrBBMSw76pU<8z3qEkIHOE=rl== zq629_9UEo$m>}{Kso{8PG0~*(@#!$!)Ph1dg}@FVXomSeF*78f(>VZ)=XeC_lSnmh zuw~(Ia)*e7fccdS6U{fjU1>TmE;goW>mL`G{2p0%l4;*6Mk~C0CnDXS>F`S^XoD4XMGz5c-iJuiRM>um3X@@I2}fmT;DOJ| zc^aYUSyX6{BLM}o=9KbCkN3Z`X!*OykK`$W%h;0i2Fn4;EdXf{Z3ryVXagug#} ziR8J3EZIG^|3$1*nzEe#Dl~lcYfkustI9h{UP1rienfKS>G7?*$5dQEaKHkd0KDiiF&34A8QK9;pY7s^%41VJXe5Hmvwtnzf6 zPI-05%=qTjyn5>ORpE+7uGli=&)An*mPH&C+Bz>6YpzG#f4G(Id8X|^@i0zs&Vg|` z++y|g4MF+KF85=GkZgc4pyP+J4e*1`pPd`EQo{`mxq^()4I46S%`@UH2$xC_Jb}_H zZRbb1fgXA&9qLlelU|aK$VB^vLtC$_c2`WZ-q}89&f=jjtf(vuz3Qou0asINXQ*H{ zle5ZzR3p_|tiF~>YTCSR6I8o8)LR$(WgA+)pH&6 zn&w5`1{|51fWoA&W#pKr6732y}TlI^kR#prLG!w)QOL<_Ed+n5_Tb4XAV;d4) zAJ@kkB+3+4H!I3IsxialP2Un3*H3YHW;J9hByR+7XTDqCn$};SZ|Roifz0Vdj0Y*Z z&E@06^DFf)*h5bOA28n22;?7iBq6FAXMQem`{C-Y?9CkMOgX{ zoj{sSZaM2dy)c2z;wM}9ml?&``GSEhODV~;#tEnNoKIQNtz ziwk1Z0(KJ@7ZPXig4v^ zb`mdXysaQ&;RNZex^N>m-1+TqwpY@hVGP2La(a473LVRAUlFx-O87fsGvSMkB8$;s z%*vN$pZPRO%Lzg5fPS(RvulY%kq;28d86sYVbf~}=d{$USxa$CI`;tu)}gYk&q1Vn zD@-@K)S;497=>+}UH&vdd`yJ>Db))rN2UL(7%Oe3-Tj}6i0Ph}EEHL6*K)@RgO2=5 z16!CD#~!1GQyDhI)E|99w)<-UxW)9cKmH*fy9q=@Qwu3v^vk2=BWp0IHu_yo@rw&p z1h%1M9irFQ()Y>d0gm@$nr^}Wh|~{SJENu zCxBL;=ueyD_pZrz2xNHaoJet+Ew{B~cH}5d|`lw{0E00zNy;nd}3Gl``Z zc7{(@44gY}j8gg6)}%+;qH3S^zo8E2m)~aBuJm}4ipH`LfL7r~uYi-*e1(gs>R7&* z1Ci8YTW)a?u1UFX6kXuUK|LRH45;ca#1)H*7LQ+L@i*~`;CNTW=RC?7!?={fr;HGeYlMS6ry{v=*rt$Ehv6QYc* zHEF?$a~Sb>$yI(Ydynnj=$LlUG3!eMYR<-iIPHTx5&@Xu!>gvjzmnr8)1XVxm$Mcp&M566GZYcp zirF=TwmbE(4fRDLl)A*u8=a(Px`KNnX{zk0DMY!L1@zWD?)irTln=pLff!QZ!^6$| zo;3oZWPeEP^o~mtH&`!n#`jnToBP$p9Dm~9TW*jH<1WaFPU1CTZ@|gR z43;iAdk=7s9InZq?30h?A|m%J7)u}YaVNG0Wsf93P0K5O`c}gPY7D>{icwz~w)1`lxlRa- zX3>oH4g=qkVVQ%|RsDzM8`Fjkt}qWt`^!=6_qJ2kZg0f}ajprH)-8V+r~PT$U)!whgW+EU^%VNezDWtZCAmpkj_HZxI#xmX{6$c?e8z~mTj%% zqehP%tCFq0*m?Lz?w^}^3u`lYa*(?lzC)U^z~QW-2X|&pXaOU=KYutN&G)dvq3I_? zCdPzLW?3qXjWya7lemsM_&0t*8%_WssJc7ze$5gU>bT-pxJ>wYLF3u&i}HRJ1a(>O z@RcyQ#r3RCoG2hletcv};cUlt2ro{qTsM^6?W^IgMljc~cNgCNTd z6FdV3;uqF%fog#Pd)Hv-dDT?)&+8>15Yl*1hq&Ch&**yV^kvpA2+|dY>`N&cXbsz@ zFr2yz~y#nU@-MJ1e*dPzddRUUMS%QuTM|G_aDKDujn>ttNHK z)+Ys<65o3`GMyEQMf`6qKqw1PP?se=E?vC`qYI<&fpig+>hS)KHb%8(&kgr~mKxG% zqlm0KKHIAev!2|oi)m~gBMRBx0{>SHx%j6|nc6#bKcrvS#O53v?|moj))ur(n)FF$ zSwm&wdWYwnD$E6Mx~n@pRNrvEs{h>gCx2bdO~Bb=n~#{pZv3j>jQe#D-{AY-Wg7{} zFCO+86vS90(FzXIk6>e$q-eupTJWTT(yWWuSxJIYnO_yQU7P@>4Rw1BkmZ(u+{epJ z7(%bt-2B(=Z=Pg_U9WX`r1pN^dVzG=y6kYSq$r+Pgp@&sVp|2x&2OZ~&FtVG-k1Tl zDDJplVc@x=()YJY^K?apJ9&#~Ik+ZEt$48Osl}^G=KoN#n z9ov@8Fk=hTjW6dpUp`#4bI^5v+=(e+i>lTakS`B^VY&}OzSrSF_K~6T&O5us-Fw^r z>5DM(SK~Ve83++=Rb;ur8{2;Ky=ym~JOY=U3*=kVI**n#+2)bbtNEl_#v2|cr7avh zw(~(^4;xKXx#E>}2pS_9H)D_IqqZT5sxICrkZ}`DIEh~(rl=^yt-^&^)1^rv2K2^e}JdW3F zpsS`ENIA0h5rK3!6arxoFP$xC?;R*3clx@pTDE9`Y>BnNp*ddDh$(xzwWEVYK|oZU z7QJZ*qz@;QrYg<=3Bu7?>_hdVEIcox6U!bgs(O_Wa%fk0n1MF@O&?cPyqr+c^k)kW zaf3Uf(=O@8;Q@EK<=&~?)sD!>uG`C%+=s+3S~EQkCpu3HqrOs_^V@2 z=jz#$yMs}jU6HIw(0ro4i0K#{VX!RYcS&BUfB%HoXkG_o5M#<6hauDZx z0ugjqoYYC(Wxn@=qaOSS795mKXDqADPJ%Qj<RTV}&Gbq>tNF?Pfr!0h%{{H)3v9rE$ZU+TQIf*nc}o3hkDO zAUo!$no;-)&OP54Goti%^4~K1w#)d3-qqC(I?DWoF`A(=_kBF%KpC!HS%Ds=h)tVH zN`I+hrDw#%AMYG)Y{W!IH+y6HGeSNm*G5xZ^M&Q$3=st{mK;yMY)cc~{hMMl9 zp{j98;m#mNj-A$f{Q11__fd0kKyG@shEYzlKstPKTrr}zIQiEuv9>;|FIBUe_^z@P z>8DGNFGtJHE)eCLw)4}}Xc7(K(@*ff44jpV^&$r4IJ;GTP=ZR zmP9aK@y?zvzr&GEy|{qwf>zvcYI}?IO73nMmKR#2-hC`QO+sOMUP83l_JG%GB_c%4 zNYPBZ#7@wlt}JZc?(72|{o`s=(Ew z>qOnxhWZnO%Mpf^?;NxI?!VP{g1#Y5wv5NG12O{7Bqvu6pYwP z;uuM2Xsm`YOi(Mkvs+6UokTR4J4=!qQynSu;)xvM6kWY$D@`f-u_#9VX<*NsX7k{c ziUECa_o7qzrqG+G2Iu_ut1`P7M*XS;Ioe+-GCpR(^N%ge9zG58gZF-kFfaB+ecH>C z1Y4>~h9R}I)e+w@z^->P$V1>VmliI+6qTiXbDnCFak~5%F?GnJ0e-Fp(Or581Pv33 zuT~);cv&8TUl7~Y)v@!b2cVm{a+#@4LwIOVQhxE|awda8ZV_ngS|%NH%@Xb0qdzEIBWKFkGQO4#8rZx=o<)f%yE8$;7N+ShFWGr4uYX$W9D7Se6S z?~s027^eJDPKodJ2``7>?wq%&qcpuNylJ4Iqd$%?yvlxAU;dMwm^PF65Qk$wS$R3G z!L~aNCSVg8uNTJ<fv%Z^DLjV#L)0Qfp`9%8cSWj1;(1lZKXr!w>Pld+D># zpaiG&Cq1NjM~qUg>PrX69ggrzN&a06KuH@%Cnt6N^YOQkaHzqSEnbPP+Cb1Nm1ov# z1Uy0S^gOO4kJIVvhQo@Iaa`Xs)vt(^pT?(nMKbZCxs^2zM}7L5n==Cz8#q%x2;2wc zU^8xZAU$^-5YM{5Pl8eRUiQ;vu$`wkRS2a1a+)hwE9R0yE8ix1{SJ5ASg!&O)&AEw zSkH15TiOKj<&9-&n~r>Vr`1v7;xWg~>C!=cmFMBC)}maLQrt-FQ?#pdHSGu|zq9i#NllZ45^9zXH>!o->zQ77AyCQF&r{_0u`%RK0n*ji+gX!5O`y_G}6y`L4{B$b$4lg zAKYgZ-fxw03TTejz*^buL^5lSg*bX&&KXApy_sj!R!O}&h99h|+;*AXsizZ?DpYss zeDNlz|KZpF>&*~~Wm(PXA6E6BcyyA9HVYoRrH`q}__K2B^bDVl6YzKFH6uj1`N@gP zRL5v#$DztBQl|&%+ti7!r~mP_8(rQo`p?0znv)lc zUyn>?m(^#=`ggh2Y-{jR%)XLndrBSwv2wP3cR;Z;K^*Aa?dz@GMn%gLUzI zQpd@;%YLlt{NmjS#pOyy$IkB-so#2Y_I6-A>*4l^8eUErx5V4D`t+>OCkUMTz*@?_*tbZC)li~(yiUguu6hn- zLLXE0x%U%;gT(K{H;JyhgSUwndd4r)*Owm*L(IDe_+9Cji~DG4DQ1dUXcBBi`9@0$ z^+des&~(%MX@PCbSg8aLHy}oSQ=wS!U)s1|$Ev8gdcppO;*DwXORw_c{(2ku6JVae zdE_5|MaVxgEw&_qNF){#HJyy;OMbi63{6o?t4=@q!sK@G0spo%p_nDj{x5_z(_m8~ z{V6i4QY%2s@{X=$57WD;j@>CRrkskZ!c0@Za1_cx_jSrXM%zd|W$9-6 z@W58;-Iq6Nu4mo#5Zn*v13@^?r^niS&hEE_v-vPJRMA!i!;_8wk~FK3pkkhQ=<)GG zi(r0PchycEl5Ww(LMt}*$K5-T{WOo0<)WWukB(y13qtWE$JC=sa*|oG zRxF4{B>hI$rk9U+@F%}-OFE7JZ}2EYicb2^-->Df=1oe0}1=of*R@a%}Ha~M{NU1e73oc*<^d5f>(*7RlG!N zG-jr<=Y!4Vkhl#bHZr*y?buNa~;tPqI^qaH6zCrJ1eDAL_g%Ghng;jOFf#Nkv*A+I`872eM?*9iq46|)lpK# zUa)+*L8*mMt+FXU~BO9|6QXQt!~P@SIH|bPs62! z;@lV*eoE^wV}?-=t&h25@4IyxWhXj#6QFtVhBoI%=5RdN;NY=d4^WV3ef9hHoe4I< zrIYF|by&TPRBx5>Xa1uFP8_&p#nM!b6zxM(RV+t|d5=>dg6Y0nkJg5UD^IDQl#|uw zi4@XAi!#xzQ%!lrC#1Yd>fix83_EjOG{X<1#v_!CrpcK#$E^FR@3U;V2PFB5wh5$k z3Zq)#h;WV5cByK?ax^Ru6Oz}*`H8J5=%CFJU9&JJ{$BZ%K!xhvQW9%T{!js<=1j{k^6aE$JP+%9B* zH%t-Ts#`^6i`MJCZQ;`rGez7tPo7H=Q>u?>s5YSCxftyJ;o|!Fr(G|g2oPumW$mqdNYcKIv$ zJ;@DF^Jqh--)-X{Cb6|X2){C>Aimr7WrCHKh*HVoeZ-Ikic$%v9xc7Jc5BLwbG>xm z$w7yJCe?kcB{{KtUJOH3yF2QN4n8N2?~MP(&dm2=>(`)wE#dSfa?syBORhV~8( zuCiY8*$xsrw%*{aXTDqxdQK9E%f5X1&0QWw<^9Uc9dSX$YO6HCMdA0VsBoUZQMzmT zZpPNZ-C%_^J3^pNM@{X^z}(=Q&P+!w)*SXCbm(P!>BA!)C8->NTq&0Kb|-9mXjMzN zSp9Oq*p{!X>TS$?$AWX(1>fwlUu=@mdN=NT=$n1|_(~tNW7BvXogRwW>!x z;dr0B&BzH?9`(3t^7^Ek~%1TpeDM^5QJT>fR3fNYbj z`BuLFYo;>Hf>iD5Htr@j?l4nOu%wXGom&%cW44PId|@vET8@K(Rm$vc$#$%$+l^SG zVqybCxf80-G5amUS_3T1TD4*c@5-U(ENA}caC1)M$v{3fJq}+NzO?qI9+hXc~8i=^cyB5*@g!!RM z;Pt$%`Kt~4wcwt#Gnyh-F}*+k>!ZuU9RQZ-b)((!?nK3riU zygWL|h>X9{N5^tli@$BaIS?LvU$Qdm;Hv9lsmxL8RhQDbY=V>dAypvq;_>YSBTwVZ zVLe7iT2$Om>8ggrYD@&YkU1pm6F7(lOkjJN7S#@@cnu!uk`_v0N zrbv!9_q%n~%pB`&md{^w{$C@yftK(G{CLo7)h61=W(*Zg_sxBpB>FEh+j27Z<5xWpWM3d zyWYfVxr}yuT-py)UVT56=~7A9G|%};MS$o>L+M+>Dg8$8l#Vm9a%)v-%QOqOr#<73 z<ZtCQdrP{>@%Jg`Pa#(QjQ12bou-Y!CV=5WCW_IddWh5V7ho=?6>tPta*~LG2EFdeCO#ti?b6+0qPkSw?uB9)=~Pl@VX0FAEx#70^9(WChe z>xBl!nt%BVg+Tdw;4LU{c@k}nceeL}ciDO#8dONVMQ9m#|u(nb)DZe05yYj>*1AAxww?v?S z=^^2xkBhs0n5&|>XNq*P4a27HeVZcn9qk_N6_rCpT%Srt5a>jQiZgGrJ=gqJFI+0$ z{R%agk)cot54tq3X+5F$!>I#HJgN8quZ(8*cB$>ACtd<)Qi1Lo-uY zB&>}&CaVxj2@*T;6!+rN!o*d9QPe#Y)n64o zwnj64T*3C=zzdGdCsX_5w{x_buUE2(CU*Hpa%{~3fzy+@g4+3@--KQ0ScCgsg1Utk{XMI&1&sLY4jrYnSnWc}xoN2?KH$Uy%fMm-nb&Dq zg*65x6}HXFFi*91k_>uyDp6(ZGL0dk)kgB%DHZ9KOIExR?!Gmw+p&7p7am>JMc6=0 z4-^1tHsMs|6>V?;fXek-Zr*Ksjx|b$%QF+fWx5bha%9jyS2Nu! zO)f-KGys?(mwPNCCtjB027^B=aT^ay0?2|i zho0m@XWAoTEO#-`KdL6dW}0jI`JZzK8B66|E+S5Vij#tBBf--ji>|&g%&YeyG4X9w z_Sur=^X*UIr>R>Ovf5?V+!hpJ(9I!0MAvfiQORyU$!W^_vMkSb8>kqa&I_A3F&3pF z)a|tkPCqQHhz{qP$G#YH_I?>N`MccJaF5vpOZR|Lao6ka@_(7L96MH=VGh9nl3jhMg=X?#sG2`Y#t%O&+?xiGO3KVvVoyzopVhy{0K^9CygZhWT7zBb- zVz)IaVplaRE4?6PZShe!R<~~=h_~LC)x;en#lS=XR%h1jw(Ow}R?>X0?7F;uOQn!j zO$rFZ&F&;Oh!X&%x5}>SBi2(}t2%w{-aR|)GQmmX@vL51!)-KRY(RPivs|8D;O|qL zRU-@v*21O{hf3pGJ=m*Fw{g!~jf-5Z^8L~*8MIawN3sW1XSsFSRof{yKP1KBD2H{) z%a4?h#w(nBjk0?lR>Rg%EL*?KDAn`~mvk*0|2p3*hfwg~78Lu6azxiwn}~D z!*B?9PVDwGt-5s}=lWnNtneSa^&QN!e8xAEFh}cr0Zi4z-A=sC0xj45v+ltYFW%1f z*+?si|6?Z~caNFu(^`qImY|^E8d>Uza-oi;OqYiA6$pG$ud!WZ%YU*46alOU<|zoX zfWe!e4J;AcfB38uZ^b%Utty!PF4LErKJpRK<}VENB!&9Zadc#0fX-k^&_q5Vy|-h$ zM?FAyb=prJ4Hbc8VqXV(Fg+uk(k)V2W0GE^?6@4j1z1g^M`3J$XS#^csel4ZDlINd zEd7?{U0w1O_Uu2a%O_XIF@_VJc=G$^5X^`~xrw~Me3W3V)J5bIlQX*0q^?D|l|Qsx z9erf|ITVC+Uf29}E{93D#Y>63A2ewLLrh}1_9JgC!)6~Eux;m;2pkq4$h@ERIA2<7 zA^^RdC7fY;S3O0flGqxG5QosOtB0dl`)J7V5N@QXqSdxXhVo9J?r+hw=AETDnN$tF zpJ=LVAH6Lrmp3?UcGmI3#w1&OWPEa<0*nwRF#n%-yf>Nd%jy#{pBv@jtFD1}6ZYz^r z`9gJNw-hk#ZAKjS7ygNGTQAQD>Z@MgdIfsrD~krzv# z*csD}`Gqj?cz*ie$WXwd1}Q-}yyj)pXhDyuv0s?-tC9*YbtcMs{zY2|QJ967dxhXT zSv>>(j-_OOxtQjlqkaEzr^S@*$oGLIAfvoez{9h`K~C~^C#@I$r;-U(%l8PvQQxF= z;mgWOwc8`dw9cQFqTCLIPjal!w|vXi$vf%-vp1;4EagOL?Cpwe)9gPnXg_xA5{xG% zuW0qs#rIyGk{)ZhN{XZ3H2p^FT~ahI{`QHGM{8>FI->dg#N{jFuiv+#i0}wNgZedD zDH^JQ8hJTxHE$7y^#L`QQH2eY{?{9TCKJ54`_{w@YJKtcWf5!FZhHmUF^*o~i5fyU z{2>z#yU;ly$?px+a4~@rhs<{YdkdoH9=?*atZ&egzk?r-Y|olD-o_ng9PbD{bo1Zx z5F!}FOwdy;^g<|kql5O19t&1u|CAz2wQc-Gi{k)Kw&9$#>k9?@!ozGeh3+iPd5g}b zwTY5;y72OMfsht2L{sx`v59Oe%WqELM^-K8PQ=b-<35X#nr{5Dmi)Wf=0Zz^``9wE zr(bxg=J)&UhihEb{G=&hup+@pM0g{|Jl>kY)y*#ebV_}{A$W9iPUw9wpf4fOpVmUB zDK87VL!(9G%IAAv;^@)XOtKev^0nuQ2XM~rF%pv+6OhdupXsX^=bUezNFg(nQ|}H~ z7fSrbcA1!ii+=AgpMvEG)M5WRU`;faR~0+Ve@zxyq9oTCI@4t43A7jpKj>n5?AlqHOs3vF-p@tMgm7 zcv2zt6{E*HZ9>c9Z0ZvC|Jn92dPf!oHpidU^4Kzt=YB9y_HXP(Zj zN^lnHFiPss+e1VBVSS_*^|U|T6)A$ckrQU*??7~Fk74ThrFyE(S>~~W*78uuJ>j0x zU<9Q(4B_LPzSgennyk<4vmLQ>I2Z5QC-QIV>I5GJy<#}1v?Xg}N>T6Kd{^|bL5`NM z;L_4vIS7Gqv#PO&z^ih!aM;R#EAasMa^sO}Ij~br3rczrA z6~oZ4`7k{cQMowHvLc;v4v#5dsMRDS!JHEst>{hYZ5WDRUP->s#9T>(Xd$x-N|^*M z+nwy;cINpu%XUZBXl?EXWe*h2w+`D^2#x*21Np2_ZC+w7sf)5*;%hA_-s7&Wp6K*c z<-9^&DZyP~g5F}Uee z00ylUOAZo9V_mVo0V^}(c*1HDbR%X9#>?F8|deed$_w&lA9A6Y2?dKGg_7;Tl(0J@ zn*syQUijk>Y{7km<~p{u;pRZRp&!tO#7{T;Z_u0lEv;Xwd)%Av*Mn&wCRut;*k(6&lb*w!lhsaEMAU^#%!;w(dyZc(}Jz(X@I+?A91_P}f>`{P2{ zb7a{0YOVg!CFzX1bg?>5`UZdN7|+O7teBR%tU4L4>-|Xw75i+Op<4wBz`&*@$*JyKG z`J6uK;%91iSBjjs-t38I)gMJKG>X$--6qbgRrbKPYFmg5^Dp)Pu$evP(Xw}lZ#P-+ zmtw#ZB7Lb!y%Rh#Zo_r98CRxKx^uNp!_@lVyly1J8tzxnvOPP;w`k3Yem3+=<7uAn zqSZU(vT?y>kKRyETf2A2MHhTofg$l{md`6Zpw~0ydUt1;Lf4A!%ZN^2?30oRE`8?F z@q6~_9HJe$s_nAqtAkE_Lh)FcvB67=0mqT^*+c|1ZI?DCaYVEwl?j%F5&!xL2cX{Q! zHCD}LymGM=lm1RH{X#dLog_x&9A&HumQtC2MXeOFe8{JZ4^vCOk}EBU#q9^wnpgWF zq=S5;Y{@byYDbL;377jARkd-pvP^VTjx~Q7I+hRz8$U)wCu#~cx?podt4WQq8G%^< zF=jR7575!;UmyuaVR^qbUAluWw>^K>#i7h3TvGK!Yg;Zh50`?YjqeBC4>)BnmDG8Q zTt2WMkaa^-a-j>&nQD2dw9t0BeXPh888Ds$Y^OGJni)2imiJcgik)NgHTZBZZQT9~kQX8rJ>kJ~|ei zD%;|Ez3(<^t8q0vnlLy>z)KM|a~?eCzUWdFK27V~ezD=M+mY$W=V5jDhS*p!9Y5Q6 zMu|DABHxZDRa(6%?pl1^pN`5#diP`cMayroqJWtC6tSr3*KB)1vQK!Q{y4$|JO9BpMa+j~eNvL8< zxXQA5F`RMF9TlsW1UBQ>T3j0tn?M{pU$4(8|A4KV6Jc=QU!&k%TSaO3g7~FP*WvsA z*{tRf%qdxNK99hrB=_`KyB@Wb_(-V=AuRf@4mVWU-;~!~_T8~d5II$rN zd%)JerrE7mZ%p{g7}k*@9lcl$c*}k_vwQ)pnqv$2A9H^arM_?hfoq<7>A)A8Q@Mnb z#mHuNOhQ`7lkmHG6-5qTP6r1+3xZ&?jpnTF)Xt?lIziq+t~|L!Gw)$VwMED7G;bW3 zW9*}H+Csa%l8oUt6SDT`&+ny^N?~j5p;kdPmYsOIO8>0gB-)I@u{-7WQ_pdGcvq=i zYP!sSDGaNPrlrO1H$3+M#v(I$@-2~GO)(TEce{VY(trFTxUy=FH=v1K8QX<sQ&#_kq0-UMVq*EsP*kTA$!7ElIW;_Z9%<0fJB=UWCT%FB3RGFxiu{2<)wR=S+RA zGRtA8z0^GiDQdWQ80a3paQ=`*#xd&QAKZ~bcsHnD8^~MrW)KJvw{%~moV;x7z8Azd z)ZjmzuW)Ex94DFFHgbxDXckOpuw8FDdek-sZ8Elo+25lhJO6M*tqROo@q4K1Nz$12 zIen(cGv}ast-KEI&gHgr!b&Jlt#^hq40fG8*qq;xpLi9I8z75)uOZlW zAsi?A@r)W;Dt21j^!fKPCn8w%RVky^$f{LX)A7J>D;BX$EcP+s;6hzz(w%zI*0FUA zRr_^6Zxm-P{q1Lw{AvWt(lb6Ce&dAA_kHRY)}yyTLd!y~)7jU2)z-9RcglZjCSIp$ z1tpn9k}WD0K*`&=n`=HD0+LPy-BIJY1a}STcCy~dAhWFzPT;RfS`}1*qQcMceFIE8 zzBHEy&4pXw?wx52)t`~^gI9x_E=Pl;_0C&}47qDTJ>;N_#1G2#h+p`)PfbxZueXcc zU$B+=6{b zN91zenKzUG9VNT6{Wvt-eO$54xuyx(n=ZVM-ZU5aYPTB!0H4%fa>dY#`bN8q6ezkK zK{n3^_76%E7)K>ht(0L>m%O1@%}~La#{GraV3`GOs*4lTqB^&XfXc>Xf|+H!@*?GT zSYhH*LTn6$7EVZ$0_vmp(rmqX{346!+MUD-UFMT5im<_pRz6P3J#gdb!i9+pNEK`y zp!t3G>zzjVoz=Rg#e*BksE*;6mBP9wBf6Jc{XuouzYnas)RC}z*sf=(l9hOOzc6i| zz+ONMEDCDu1}m$Z)1;p06-%WMO<1tl za8~|=MkVO-mCl15T54dp!QZFv@`bh^(UK*bPY=){OTQf13|XGfI%unw#+KqHQ;DZ_ z(dW(ei?c9p&Fr(LmLb`Y;rHH!A<_K`Vd`U zAAiw*>eR9f4kfTLqb~NzAwPuXgL*!<1Ojtlv$h)gu(55bS@+DH>%<>s>q4aE&+c?IL6Kf=jLyM4%HN!dFItw8S9>SWo; z&f%C#E#MZgeL_@-31vel8U_ zTr(r^IspQkm2}T|H#_anY;p3owOPduzZ~;3gfO}j#QXEE`?kgZ`{BV+Wprmywp166 zvIZE>vo;_Aj4^4yH?8IIQooK~Ac`uAA(9{W2vp>wiRli&#B3lb`MFgl6sRe-sdR3J zrvyZPZm;UzXD6C`unC#rDQ%gZuh>N%Wq1y#q0;TYyM3we{+b!6kkR|?>&)TD@1GDD zjVtQRq%#NfJ6iSZep2j|3jNj;JN);qBgtviHW660HqA8=GgiLwhS71wWr!L8vy8qhW&iQQ?0v%yhYUB)mQ`1rsGZs*3T2?gGwQqgkLBlx zi;C>;f)HEm!oK~L0DC%*$^RkzB?I@Yb9#7^1X!65w$97`F-u`{8$Zy&pWC;X`*kzP zfIeG*K6xpXd;v&LR)p#cm<_D`O3i3%*Pn37()|(%wYu;9W3&fdtGi2<*y&|U$WwEGy8;*$Cq44L+L;J z;V6Hm?FSwN?MS1Gg-Zkf#v7i+)1Cev6*WyuCel>^b}v0>@1s03b98 zF?1NBasOK6oL4vR>OcwID{PT3fTSqnYFc5S8;#-vO^^Kt=S#rZ`0I5}pC0%bjZB0h zzoog>)BCsSbYY2Kl&DL83N6++U<(s&JJJa*@K*E5B%0Vf*-Lf2-$r;Uj>wEM7CJ{!`eR=2EnfHgKsnX;16$^M&5KAE>JB>pEp4oO2-AvAUqU0r@)Kxg3EkKwT_PpRhaa|$r428i~7E8)5-EZ&L)yV8Pv^NP3jBUcFY^CxS8+K3Yg zjQ(a0?Mtb(fTO$eQk|lhXKHgXm7T)zL@iart5#mAmNY@0V)61PNXhfTYN#Jp@AYgT z+v?#UwCLo_|5CC$I=C~kHb)*p;4NFY%L0stUr9LVXHknM=gpRg7=v2pu(>HxmF4X@ zzPNCi(D)U9bOqO_`;6eaw5xdv&0R zeEioKx77$DVc3xW%`U<0t>bd4>Z1Es00`9?(|CC&L~{*y_S)v3B;5bkrI zmJ9*`+#QK#Ot#ew`tn)#Ctye$+71^*gpfXj2N}imMr7vam-TD{&?OLoaps(U35Ya9 zjqc16SKMKu@y}8n<5!j}F!d=H60rF;hV>^%SxMt907DJjVj9OW%4$GX%eUsm1K#xcPUs2iV08 z+#$dEKjI%u3;ZA)gb7Ie|IbZ?6_jY8&pD6@$%3h$TEGV;MQ<^aiBP}R2A(${e0cTX zhanC;jr;dB-`@awAlRl0NBj$nLMEaZLqzG1{3j!r7CJ@h`(5KO>=oawEObf(I<*i> zI{I$MG63^41^n?M9Z3wOW06+1Z@w7SfvTkE% zV#?A{bv+JzJmYIDgVb%$dq*B0z%_W4dtMLa!^BClq4Nd z4C$Gup*8gz>ctQcb}mEZCMTfR(f+BX#14rzn7`6+qc^;jMw>=<+lQc4|3WOsez%^c~#_>cYah8^o0o z*)GGYeaeG?pw)~vPj|1@09~>SY!EWgOPpCKfw-XyfuGFqH}H|Ud}n388p+lXM-Yq?(DrCdA+sns)wu?qcdJZmpW_vwEWBwzi} zIQ$HS;Ql3X`3ZhfRO}(66_rE}fjn*pf3e_}zEFkPiikYhb-$PAF7AdY)yG6heb(n2 zl>$FOXBJNGZ>U1of2{`@7R}t>+BDS;?zhRpuB~8>h|C!HSm8odGjzTwHcigeT;XQ0 z*~VurWDCTG-(={pYDPOxu%l;nv7E-Rbk0`UkAB?}pd?!-h4*wQ`R9xH!zXgu!0Ngz zR6rSqDjdMsAe@QRZQ9o3su7-i>d2w9X*+!I^WPV2`;h}VKumf4NR)faNeAVCBWP3A zI4tV|Mxlou^QbIw5g%`}%cS}iE2cBa_@8F}9}fwxY9^*?X4&`F=X^c1`#MF!XBk&+ zZUdQIdC;(e5S|(u?Gw=E52C}75d$BOX`~00zqf5rptV>;9sm3vdX)+pqG0#y3Wx|l z|LPmiAHFd9@lW@q-~ZEnMs)`AlFM&%UUfQ4*#JQXlxD6GGE`2c@v z4*L+mO9FFDQve~_&|uDp&BZbP&x;LE$oqAxgQ^}8qoO*Kwk4IdAqr1TEl=cS458&( zsC=l8@?hsPa!i;o2Bx*{D9e<=PX_p#W~VJhQZ%?2Z>d6G zhJ*V00p8-H@K(mA`5*3zbGcm=&JgTL{ruq5SUnTifio!k39rwI|4K38WTPH60ksP0 z!sngFp;Qvr??K;qIJUF6nft_C4}g(&;CB%U+T_E@^HsOdC8d@l5a!)u$YL)+RzU;Z%snjpZp zs`g-l^R^@9QcqPlFosUOf-3ncfPj0V(sX_x=$XCV9gS7kENXVjRyY97rXvCR$j)=( zL7Rr1Hgc_M#z~JvEXz*!)&V*dAz8o&ox*}neVzT5OAXDXgXSuQY+^&FUeBgCQbX}* zp{%hA?hpkGrN$GD*;6+q8r&H_p#V&MXL_F*x00EK%xcDI4ex^L6@S~br8&?jDVj%N z^~jCe5%Jm%Xf$y$;af=G7g6QGT0FxqdvXF^EpNE6HWe2h+K?m*>NL`B1(?Y2+>6xq6_)jjd9O-=`c*C&t}{T)9x{+p%4`*{n!P=$dlH_r(1 zNap{|1t>h+wbzdDgtzGW+r|AhAp>GZ+H}u)Rx#6nJN7|sjR|lOLu!a^sG%lwP)0?` z>g)S?-Zp?>Byc5EkVt&@~?tJ*H`0hsIY7KwXYe?Gxz2&$AB3(&a>Z#+;ALg z_)w+7nT8+WW9>c^vaHaV1~`0Fvrw(50?54@&9pq&6Yeiw-T3vDz7WxwdG#RCYcHhV zhQ(_Aw8}U@LI|oE%)4{%<6rP~It`RWUeeN$-F)X2j724=P&XTsJh2W9GT!E1B6$0H z#Kv-VyS~I8VXPp$jATrsw*7~Kd0U`l0k#$kaS=o#3nHm1i3|5qh&O3Li@hACbYpn^ zx&dGhmW=Gf-CBM6TWdv0n7kx(8Y><1Gth4>2-a&3YkcMB%h)DWLh5e)YUOoPX?4xC zT_SfISE%y6^G8kttp1$*U=lfaXFZFMS8E69ZaDaeRgSpacRm-kVfFZ$)~4|1ZFaMp zNIurpGnP#)(N#0If6*kt$5R^KNJp<3o%s0>=H6V{TTb(&T_QwP`^&>M{b{)LhYX@aXZ8)v4Q|0`CHrzHV*mX-! zxS|Ln4Y2Ys<3`0y>e6<&Zq$Zu1dj)u!WJsxVZ0>h0gX+zdBCbCum8RWsxeK;NYWG? z9)4Bz_SG%e)p7h8@bMCV%WKU#fT0K>{fgSX>L*(`sXBe&Ccb4{=BI_p?zG%6otHsJ+#_1CX7vtoF##5k$xX+2!H>Jxfk z47YCa<|0fyQ^uLG#T8zX{mGk=*IT`NhqUGMym}k-dxPCRX*fAcOG!VMdtX$$Cc`lQyJ0PnU4dC_>0@#EStjr)FMes>`Hx; z_ASrXbj^Y1&UTVe!0nQbx_`S)_FM}%*7fRie*8sL<8uuATHf!x|5&ChB?)!J%9e$$ zi(^+Jwnwqwx*h`{%M!%4AK~00bQSGN-#HXImwzVgkqBOdUWLUJI&@|utK?J;hr7E%ze zEKDy#gHPsqn!ga59DXRPa4Vu^uJ6`?Jl_|_%pn^_(%HmdxYI^VVU?(l52&R6zTJHrmmdErMpr}cYe`Nc@b(ASd4Su5y)L4ELSkMyG+&<@` zbNrE=@Fz$E?~mpl5g(@+EHRD{705|GW=z0aS^$CDsOJ9ip2x{thhxmU$fc4XUW4Nj z`*U8i*?9?J`)x|7&I*9NRo98+FBNr-aJ`Z$vJOano$y`WyOpfUm)=U>bj3tngu!)g zMLiOPBi`VJkfZ-6m<%b;(Af9S&a9rVXQpypuQ1*$iqby?&k2nuu6^kj>s=YRrHltf zQ))w5oCyT@>tVcIV(1SduC`SFb7$Gtk=~~P+Tvg1*Sl%bR|l({0*LNh!VzLiNls}8 zwKuf1t&YzzhYy`^(~6o3p2||Q);HLQo!6dO=3D&(Omo?tjl5Jj1c$@{bNB?HCaBD{Vts(yp7)#t892A_t zELgW(F4u1R9Z6LV;$DAF#?N^#pz?Lo#Zw+t6_pk7Z5W2?NJP<8H~+TwC7D6u0(lW_ zZ_a;|+-ZuJK(C%pfNH+SdV)6a969cEHeFzo_Xn!6>4WL<4Zw7gp%tF^Y*Zh)G6{g; zLy$uRO!z?etEJzRa9h4VakRNl6GTUj9IeLYITEV+eZBX0+5afrvvP!<3eH`F7~Yda`DngWmYuqS2818f{i6Bee=EM8*XHn` zj+!NQYkqRsY~dyg*=&%n+RP_Pe#rL@*ap#>GyH1{?C?9DOkOs>hjxdKQis31Od&RVALu$fsndlN(53QOzxiu-!ctT3*GkW+>1$!#H6AfssLPtS zAhXbdH2SWZdVwqT*G%5XCG*X{;qU+Y4Zlx+QEwQ+5#uOI|L5lS?RV-wD{kzPDe-`C zc`AD0+}_ez^BYUN7hh6Nz#X(ZGVfnQ%129%;WxJF$7V63`m}V8QwE0ieQG>8%2qx0 zEFqv*2rnIR$c^-!x<}XNBS+X4?Ydw7Quh~1$CG!c=r{)RSIV^KM+fW4D)pVZUU6B) z(cQh=A?Odu9drSXqJ0t|W64_DKmR;ky?JHdyl>Kk#c}S&no3}wgu?HI=C_vZ{^yU< zBos{4s~I1wzoM#E=Pz2BJj&X`;lwx~oF)sju*QSeo;qBgK@Q=wd*l?YM?u;YP8Q1; zLvp#JZTXw+A5F$24dKnUZ&<_@bS9KK;T`d3MLJ*wi@gN$6+Q*}e6{|jXlzh@$+%;Z~7JA@PK{7lkz9Q93pZC>P- z@$kh?Zx1z$7LB4|Y?bp$NB(VZbVE2C&if}KDMfu?1n`-9@v-L+P7-n>bffZ9Onfzw5P9%jGRl7$`jgpqx8pr2XUQfDuxgN^d)7Da@+pO7DeHmYh#T3I0UPHcy5fUQ2 zqx!Lx%y;UJnow}%y149DYcd3aVFd*llgs}XNo^h7x0G9RJDGqp{v6t5{T!`y&z5#F z(kblGZ`3Ux8>2TERUc1xbQS;W)2t(K$qo*h38*gpNlvx_^1+hvr*~Zg{5ZDD@E;ug zW^^rKHV)7OYnktsVYY#Csl>+|QeCdpG&Duim&(>Yi)t2;8Cs=H$3^w>4$tS?iTWyQ zW!H-~(G}d$Jw(;D1^&Zab8j?ej2|Q$#LZlw8tY@A{y4X*SJ?46=#6H*U+46)#P`1_ zpOT+eKA?IUWE1qcmf!QWDv>+nt&+P z&tJ#&$xYD@QhEBY3t$sp229>`d}#gs&eszNtx+GIK*$L%6U+%D2tR9HVubPD?C=4# zBFc96P%(WP7JS~KKJ5n24`EnwxP-zb0XGs=5UDTJST$YFtvSb7ZYhwcuP=~Dq`-&@ zNa>j4ofC#5woQ^@Nj4dy2MAKIrb|+clBNFwzxO2YH88v4y@os*Z$IWg_7-?kmuA?e z>x#ykZZWRaZz0$*3C!m1g!dt^PxU9-$rY7$JtY^#UA-Um0K|uLb#gD_V9eDnN&bE_ zWzmitJyE^JpbUD+TSm&(nm?4R4*O@699m_xG@mVF7!b~cJi*3d zaBU&d)BqQ|M~84Gp;#BUk}TgFUnPhJ(&Zi4svgfV7itY zOM|6{D2DH9(S=`P}x3nUNW>t`^FW;aZtu>T0WrB*4Vl_+aaWJa*%??uv5bW2D z1M~c7d^M6mJS z;=lQ@?Gg~+x)o2~CrU9Ns@0wLfe~x0!;|17*$^vHKfT-dipG6fp$4T}*-vQ2R;P2V z$3u2mmKTnqR0qGYO^5?N1>%P7E_R)5&zBj?4S)w) z&(0(`vr<2rW{;wVFf3u_r>SR}Gf0@WtiO*{8T^~SZm_z$H@7uF-7^8-Q!d)0tWl-(;bWN*so&GIbhd)A#MIsVwzOH;>ivd&O5vSe87s_Nj+ z5X%Tbb=_wG?V!3VY-uK77PxL@IlKNb5cDD;sJKw!r!?3ejf`*{k8P>g0v zX1#=vf7OjbFDcQFBU#5*#eB}D)*qCwuTX7Bor+j8p5_jm+H_u~S!_~{h>ALDp8gxs z@w^7BxXEa`nR8)QJ$oUOQetTh2U0dsrjIO~PBmpKimqh_+Sb4eA_|#*5kDO&^cB(E zru<^{S_OlN3$8kwTU2VBu95+}b*kSFU9`qW@Yw5D^|XlPz7r!^^YzZv4OPu+AR&bL zrBA3$l=$GGKYnMhlWvysE{PYff1T=a){6G@-56;Ucscj!Wv|j@4Xu20v$?~ub$Zg* zro{52iq);8COOetH)&z<=_5P(9#26n6+TJ>lXd2O*KrCe_t=JhNR^Jfh?#O_@30YkB+@QpFuGX}NpBZG3qde21vfhZX5M(`u#G zWO()xE6!Smc0W2Tu5{FKQSM%?Sc>CCNa403@$D$eIob??o#n8M@)PRES^D|be6TG0 z0qlVNY%^rHIv$7EvbH`Pq;~c^?ImvduR7IP(@)Qz!-zPm;ZDkkq(9Tecl~?!w6N9~ zXGM*x|EkwN_DUP8z+(V+=fy|EQ@wdtoFeCBJni@#;Ts3%ztbx?#H3%v7F+FnC{ zOGJ`NWtog35se8gCHnW_;>sEa1{UEg0-C-_{qwnEbJzh2;`*>(Awi&Vn1IzAX%lL8 z9%uM=X9k7VMmE379sR96O1C!Oy9(4FnO9jwi)TK40Ow&k_)$;jiROYS%$R~*op?A2J~OEX{i9DqS^#Vp+VA=R>zmGh41mprc;zy@9DKy zSp3^@lz{`*BVcDjKMwosYI}O$%9fv3fi$}iH2%hvUOl1J636Rxev(X=WKCVFt)o)7 z^qA^Z%XsL=0gYul%0eO%@i(vd!TKudkDuG5 z3W;GY$#&qB3{9n(IOx>=& zNe~OO<@}vvBt;fjuemu@i?t}scB0I4o~x`geK=Qv!R$re6Yf<6{y8Qjr|Hs9uKfUekTy6f{OXK-JyMN>?O$bik8f-es5%UAsG zkk(Y|_Q-NgH3G$)l0IKw+HZ59OYsuB^M8BcV2y9@uQ7p}VdQZm}9)A9)(<18SX zbZ5b{PU-Dj;1&0fB9*nBb8?dEv~Ukj{%Ysm!*n-N?UdY!gCIH#c2;F$t1S$d~IIL{+@aEjZ3gls^(zqyz1b^W;}j-7b$d_ zkF>v`AHI8J81Ve3zf&Ecnmr-D79^}W)j-rSL<*OoGZLQ4`z&&qE zIyOF01ZxJ;_|j3(#*9@?J=Hb7{s3n`w6;>4!1ezYHm?;XXNAjZoB5L{H11kBUQNMJw(hYkJR_do!)gXz|4JzX8>crT!c6GQjfx z4k&h17Wr_MHV3K*?MK0odx|;sC2-b~Mqw{rvVO*AB%%!Hs8I+&BN4f|DdqnaeUkd# zEIZGH6uzlu?yy^%LxUw@_#^A%f_wTpd&vl_FUiMR6oUUJ-+XXkiNc>u*#_2BhSOs< z?`j2w#jx^9XVZ~IOg1Ytkvx2+-Z8UF{-o$hVr9M4olJU46w$zfBqcX`ARgim*aDSq zHA(FFi4{;A-Lwa+?qkPp*jC*+I8d|Sd9^r!iDzFY1ZgmZP?p?bQN^7PhEc!sIup^~ zgy{0Q;@fwgd@A#xDUa{SdtYUB{!pDyBQOEp`PnM7*}R}{rgivnVtbRO}TS|B1y}YzR zeHA-uc6f|u{IAU&bWa-L!M{C6iBZa54`SKi9LJL`kX5M zIyUY?+|;cvr|Mz)BO|lxRgreO2Kz}T5yfBCRKE!cgp^X971#m;8G)@!uHFLJ(?j2L z?zD^iI^Y&{L%G&L@&~T-uB6n?u#z*Ey&VGA!q{xq`t)CHhrgB3f20EhF2+7Y#Ib~` zvv=K4Co)K7QQA)!@2&WA*nyUMSuCxC4Mvms)k`|IA8ye8I2#DT0$jTaRApBEFXuXn zsp=Cn>YVAh zl|L>nI^e!FcoKaZ({%)!SDszM^Gug|;WWG}uRy$qB#}-5Ow`1Lv*bx97^e0?3>8&~ zUy7<=emQ*Kers^43%osxU6!|p#G(JrvRzmpP+g>=tDoUqJVZsK0|pJ59+=Z>i)$4P zRTZCm+|`_eS9DvA)oUrAH*PGaV5BRsr`nZQ1c6;-v0zZs1qsA2*bP`puI<(D<>UhP z&f0_-uATj6mJp>B$YJ|R|Dojlnr}LKBn^Yb^3SG4HLn#J-3nRN5!Uf2IIgi>>&tz8 zZN-<(+0)zJ+yS=gOno%Julh9o+0<;;*`Kfc#E>x8Ba@xN>QPKvkwFHO7#>8zu7OvM*k=9?L`WjBiK z0q8`00Y-#A`#1j}(36xg!2)Y&Ntr!9&E&)HIoixCu}q^iLA&+0aWkiI4nil)b|y23n!FxgPa@r6_4?RvS((GF~_1j*U!%jB7qWSo(asv{rDJ%J{f+ zit4_-rT)GNyQmbmy$LEq4CMyB>(lv`IQ^6|MlOQO=NT+fy}q0p8b-U(xlawd64>eL z+j7G_sYXm4;H|kwe*cj0(beo2jh-%{`&_}l>z5rri8{9%9%vL8?fWkAE$SO6n*YN> zA7+pL!i{c^@c*a_=qvaM|NWYRy74=YWM0D!NeQ*d&gv5^XhC~(!+hVT zxMa*Kd&@?gvfuzWS2usk?K?3M)J{og{UGyoN1w^O(ZZW{VFXU~_cm43w)Uanmf6&KgOO;kE9`=*?qheCg^b z*Wc;7phFWaUlh>MGP(QcAP$U-lRmiX%-QL@?kOx1J{nlgovE1~EOG&W38A1Q8;xWf zc5cs0QbAzs$&e}_Idg8Yi37k*KfhtzZj|*v3mtqc?LPn6X6!jAqt}p(Dn*C6 z{YlwDPPW)y>-}}@s|WbP>ESJ_d-H%Q0IPsDMJD>{)Dluv23b~PrD3hh{;kX5K=>FU z*n#Iu`3-J$#SVW6?rj;$>UB?{)zZvgz}hp?Ki_z}Urv^{hi_M6`Q`58);QSNZ9t~# zX0NH%131cSg`Ny}zxok3D9U1Ae}#f$JR0Jr`H+JTC63`y3s|Ur0+NEyq&2rGAoZ_Ij%5 z6MyFaa{(|jn`rRit+kSEN= za>H0hWqS+x%R;uO3on@zxg=5%6k-ZQcv@2_x|Cn%FR$&EF|k6*tfID?Ut^$KM#8Xb z?-u^)JqWf+M+5XvUTVJuIl?U9S-~4kf;zdmlBoP;wm#)yF%mJeX{kG?3#hO>z(`Hb zL6IDW4h!~zu?+51V=+8i)_9P7=y{}mbH7~OAplvwx{Cw({j>LHED@ zPeA-O^SDUf_o2IG+)C?Tlr;>dyYZR%zVj}0C`is4QC!@|qIX>r@%g83s>zM{dKDDm z96*WFBl|RgCmc{HCqL>9J+5TP;6R^12HOX>W1ms7=%#053|jv(?gccF>z17jB}!U9 z#{JDeF@m>}pOQ>J3G?@#&KW6KCKT9p9x4JFRQ`*{bn@#cr>7XX#j!YT;b*^@l>SS} zrx9$3U7w0A&~5wom`!e%<`Yq7p}wQ8Wz$z8w?B+7?OWsA3C$@YyL)3t`Xsi1d(Jjt z4Uf6uLpXJQwI|=Wbri+MohK*c51rFq!a|a~#%}qYSSyEB6_Dp@ou)kdVkHhoh=_dL zBYHTosua~v&3XgsK^+%u)#N8MAGKo6pnE-f_bel6@QX4OJ9GTS3^0G6u|a~6a;Np@ zhMz7=y;{6?f$6skN#t+6Jm2Fhf%x39ykY&L$^jJxYce@&aaFXXDWB|0-Uz;@hkwd% zTfJwWxY#kSTDX(l5LOJIkh7iiT&*L0+5DnLu`}FqM3p0QMj`(&rI!S&uA3Fj^>XEKyjTp?P?==1f%fkl8fm1p z(aEBlWhKuSA_jVn`*B<|iq2q1M8+K~%j>%9DSDoR!&m%wQo#JiUUr+E=hiFgYDyaj zF$r@Ej~(x+a*tgZDAIyTyK??|ivrECWFpklLiMUYLknQ2=X~OIHHc-D{OUW&!ZLL# zZgjlmr+-{~MKi1t_L>Y}AJ6A!>!T1cww%N{s@A_XWa_F790c&bJ}f!d0ULs%f6{`_ z)&AOCIA7Oq*cw{QesqaT1qI$k^_PJDDV{(Wu8~t8@zc27o`G1AL9$58|&)g zZE`!w^vFphkTn-&h3*X_c{M*9=6mx|LpQvI@SYhS7>hJ1qa!R+N0a5{NU9qv-NuX< zj>cQR21ek%EFI!8oSEO%87cJK?hfr>8r^xAR5c=EDn-x+10Y!KyQYE9GKBF)m0)=T zE-R!;TMPrTu4r~rvn56I^L9I^Xg0cHl2b|@y4-C`)*p+yk;4chIY6cAyfWr*ivNMh zzAv<5_Wu)-S>_n6;A-2o4uASBN$c|*DQ^~+X-egs?Z@eHtSfZTXIJTlV06LUT>vyQ zZZ?av+xW!VX!vsD@~z--VfHR#G&xvQtiJFlePP)nyvZPj6 zqP{}M2+J;aI=qgqmfhK5c2;jP?T^xEA|~Ond&}3ivyA@=4NH{)8A;Sq<0C}{=x=)8 z@U~k+g9Rr{A5yRpJ6?yhwjxV|8#&7EKKEh?Kwg~l*RNyA=7k=NY^q zRKL(2%yUY%wsK1U(j6sfi@iW9um@KMuP@bC{~PMcru2EzP!AiU7Xgc+E3`Cd9tFkR zCk4qC&pY0Dxu0m41R3A~XD&pkM}`t-Bct-TpZ7QBY32+QzfoM9m2Lwu(!7-K(-KVy;`PHt`Zgxg9D1+uLy zd!U&Rb4EZ-+_6|{d(QXUvcxOx#CNfo@^TvTK?)o3?*$_)L1p(AX%&_(eqI!A^Nx1_ zzht-XvLK1(2_aRE-^?rNXh;JxoXFfMV$+#1(d+ilfnI_YBE9!Pd0T{(70$l(r&I%*89FS%4)HSL=5d!qd!lq{>%0y)k^oX zy`z43I$F9_PHP-LfHP}wqFr%76WCNcb8KHyLCPkp+n3j^Y!Lsn#yY22zxmXB>9n-7 znMp^pHCZoZR5nvDUjcm6bR&`Ai4Vii=IfXhtOfPhc{!EucZwq@vWTorcgS0CcUf5j z)ehTy+CdwV!=(Fcn+^;w_)9tPfTTln&A62}0WSdb8@BZ4$j5mu+WnZkXypCm_y6xSboRg_B*<&@v6j>=LWjw!A z!YnN+&PIi^k?~AJbB6FZkxnt2WC1kcn?ohFrG<TU)Ijt&vE7_x8Y~y8(Y|A-hh{+ zcg864`ms7zW~J$wE03V+)a$6eKh81UQ}U(@!rO(tZw~%}&T#}HF_Iut2sQ$|(3~Um zh~Jpb{axXRTr-ONT+0WjJ+JEG#wU-e4s!e6b9|q+>4JCu{hHIGnlp(xD^CNkhnFX}yk2pWCot$A!i9AAMnpj#x84m-^N$h&om_>*xv=kpA31lK ztLeJ!%Lih&0jjexiD|WO<|=60CIw*R24?O_y+l~@llw&eqyQE71JNs(7Az6pO#ywO z!DnHNrKQKOwG?Zib107*pRs^wuL;B33Ts0`X@2^M5sxop&)D13d!XG#3Soka{eS9Y z2qSF+3!ar56(82D3X+y}ww2kC*=JL;8opJ1aT2dLJZKY-(9%@4-gNgk3Pepx4PFs4 zKXgGeSA9;CNb)>D7smU28!@E-p>^^K57J3wY_f0_enRKvopWrl_=5C9UB~KI9c;nt zL*H%CMNdEm4odYCP?z{{nSpkL?agFn**?FdjQKH9;yb+3z!_Zyn+D+Ao}bfJvn!l4 zvtQ6PpPatAZK%eRyO3{>Sn0esUk7CMh_W-7a)(pO__rn7jssUTka|+<%9tjE+Z<;x+Z$^L1;8~PK6Rq*R$bGV1Q&j`38lQ%E|XRL z$nZ0?stxUc;4|2LZ8y_B9e+}@MV~G(arvvb=t-0D6yYoU+AYhDDnNZ_HmDjVo(M6F z_}f6RjN8!>rkt;>fiw4cP=$fZ0Gs>YrBCBmnXJaB&_ZSGMhE_S9?DcVMC04Er<}iA zv%$3nFCmgXOk~ZY`{ZhN+!J3>#%(qM)-iD!4gV?Pb+*b#u4Bn}n7NR&_ zmelj65e?L`@+o~2=x4dPa6#nazN50oqo!hmE|8-J0)Jb&x#vi2>9QVXgHHP>-WB;&TQ1S7?>K<5M>_HQ!oF zXa!w=(*gd2<0;i&;Wc+Bq>IN=`V5-)6HFC%A@6g{m$y63C#dT-yv|@`x(&{*`)6l%4 zhf*_9wBK{74jQWubvTbi=|IRc@SXc02~VH`-mB>bvITwFHhc#;X^Zgh)Co3j`|Ft= z*og{^V@hb%s}*$ckjgv5BAuB&aIq9`y)9^b?L2~QZPl?Li@=d3d|6Ih@W2#OGMu=a zR7nLKpTGeMyQ1XHC%+qt3bdhhukBlnB`o5n37H=n7!4OFrs_p(QUfMM^U?M-zec9}KvG&@gg?W1q=h zAnBXR_x#+J6Z6lv8I)=ByRF(*S<$cZ;nSS51l9ZmY}L`N?#dZmulVs`8k)cu=47x` z!&2U~yHdlkEKJ9h%M;W!(g5*(t24}<*}9A0?+z2*?SK8?0?1CSyI~e;^=G`!O4wvc z-*@zoKOV^Wjk3D9AAFQhy|U-{n;=2CgT()*5f^VjCjIyblZ677u&0EP7)-3W-^0h5 z6I~9r1@eyMFN|+hjG?*uIz!U;?oNlKUtl`l;{SAJH(`-(Dk)*Fb!S1G0?#jZwBHvI zvyOE74o$?K7H|7=jP@Ai41xJSw>rs#hSmH=7u{osX*jk}13cZ4r!ocNaXZE9F7qY; zHLN#t^5inOXhZn-tSVC;ev|!39(n5f&9u&veG&+}}0-Bp4-~cP(wM zQjW_(DvDGzIH4f^D2n~yTvZl(ZT13#l|K7*M0)*#p7Tq29U4cw85C>nH;inH4lh&KdV8kl%EjHnW36r70DQs7D%5QD#ItSuX8> zPH1?&O*<)K=a^Za9p&)S*y3(gmsrsB0j<$}c;VUMebBKIhqW9$);K!o&@jF&nQ_L%ZB! zZNa%d(A-~+a9L6u#M5JSQ$BR;um5b-|u>{x<7}6Fa-)_|YGHJSEkvATvY7Nr37 zWv_@5poX9xYnIpb@rKMsZl@~U%5CgyoHlGFPCfG0%5@g3eHysqq#;Se+Mk~(e%^ao z(mz?bUdyW`BoQ$NMc+R6^I}^)OOaI~oRWYI%?etcQ;E2M_o>Cky@X_3)<>aPMt|?8 zR9h>HYl_B{cG{(6Gllf#=0Clgt{Z^1kmqIXwr&`ZHAFliyMIk`1rrdDtT!weE-^Yf zQt~2~90;@ANGmV(%^JCba2p$o840scFa_o1@rCB{HKdueRrZqRyWB|f#Hc7|SUD@_UsXwCNA*UOoTE|drOmV&a|F&N zDtXO`#ePW*PtS^;T+gI%Cv&o5HnK=}WzIiEhmgL|+NIZfRHPpVzPI2(RQZ20fAkmr zAj2;pE;ySmdi=5dtaa!2i6`9~8K#d%B7(r;eCqYq>W)&WjKj~hG8wh~bp; zgv{q5iLHNGzEhm3EEzR-du);#p3BS^)wg%n_toHpJHpbfWTSCGv+k57g1ggu|ERO1 zXR!8#Cer@rLHrA0XO^jyIy2~%aT(Z;Dw66a$%AIi9CjjNvKom-FmLr($!(DlM$MTC zVcjE2&olOV+5dWx9}UZzvwDb}Q=gc!Z7 z>4|KXBr?yl(haDKO2FHntl!?Ff^GrpywZ6UI%)8nG6InQ5^CW6wGsyXucky4(P$9j zZ~L>pTX9$Vvl*f-1?{iUVXTFw8mF?GAm!*NjXp3RJ6K=S32dexcW5i4L(T%16$%B` zwx4-YpoV7$i$wCoKjHENeMjV$ueThw%_$QphA><_!j%LaFP{6S7i1P3x7xn*Jt}~prBD89t*Eev@z~CziXJaIRaNSdslAzw zYZNKxlT3msYtF|4U8Q3Vc*?YkhzZvc!`D!hQ!DQfM@(Yd4 zCp69H49iCyn;kCRrJL%l7EKaB6jEM&I+9S46;(x_(J>-B>90$dBZ?rI+QZ9QGNMuI zLTD5D<=XMlEH~vj+nffC-X8vqFRYMW)OVBRN)dJV$%go9XZho{W?ajKkyvh08dl^-+G&?iMI*=gk4RL8_4&>xZtASjxC~yKO5M z=J!Wb!vxK%eqwPDQ4gnt1fJyYp71mlovI=qL<*E|nlwOWl?vS~cSnR0p6kL*A2`Ol zG2?5d04oAG`|ZX<#pRd4-uM}8GJ5!dtk}ih#7TY2uVvI(ouiWpo7i&%@ zb^T`Fo-uD5;_C*>T-X32elt)#RRkiKg{Iz`XNjQMOKiBU8pCZ+H^_q4@GL=sKwCw` zF)%bfY|!DkizV)2Q!dq+;l?wbJ=%OmFhAk<;p~QcU^aszi7`Wz!5T9EB>^xE|C(QB z>jKUqDyBxvUB|}gXL`I01tV{Ca`dO{+eswo%+N`KU=LesyH1GV6bNQ6C*^L`7jad| z(}-rK&yJ8zfrQLX90-&rPE8-l;e(fT^cv#Min2?S^JOsVuVJ_0{WBiAITzew7l9Hu zlMwb=>1YLqgs?!556=4Sk<;lSIsZq@d&_{i)KD|_%_0G%v$*>~uV}H!aq7+K%Zgo$D+7&u`TcK#<~38{)zXqsZ#K8Pofst zWIj+?K6iFxW{XlPY+jw|@f=kfmY@@QJXBh|zFVm7hePV7#rp+@<&VK!tGM4=W`wmf z#T)Z3DY>~&&Q^r>WdFqCt%9Mx{WNFGkp;2dz$_$S&+fP^tx{!uvzb3bhK^LIRAo2T z_H}aK$})zXwx-+tudmiBspyaL5s>(9vq2}|d7I%KaX~>P;E$d8qaF3GPOj2~ohaE> znUgsqzVo(6+*jsn^z{41yCXroUg6E#ny}d>D62`~?5`il=&r@}(Q9n+Yt7f*Q!9GY z1J}RA)Iu+57iiy|^1hbw zE$i9&YWp3R+w7ORwN%);X194%;&>8mM1ts=DBQ-9veE1o9icar!=gz#Ji@3dlv->Y z2{m}wARMELKfX(iKU$Ul8?2%*sEF9#CAQ@U z%D%y!K~of0%MtPjCr%xS+kl2V!QZrvbOTX+}x+j~G{N1X{oK;U@?RGrQV{ti#{UC<}*?ZTzzt*j{G!UC!!hbvdQ zuS>9C27X@U{pu7}&3@1clYI0g<5RR%TZ^hd>rr9NV6jic8 z6cj&LM*YdSnbjNjihW9awAJvGTLwO8 zd5FF#2Y&kpXOK(;yA2}Ux3*iiVnFMy;hJlH`wgvg=&q-tOF$V|)Mh=W;oMUU!0$`* zq&EDK=%+i`Ek3p2Gv&+o@){O=@&~@)A)*rB&TiVpYQRRZ-XkR?zWF1I@zfc+(PCp7eWzI5=|m%4walrBvkYjzuw^4Nx=PY zs?*&$r=|OQ?)k?@22m2no_>zuIf8zg~1Ucqej>&fprj|B_0#Kq1QKoJvs6)$1=|)9n%FcJgXFYA(Zi zNCg#_>5E5Ya(pXS<+*@p;EDt>5r}+ctpPfpi6VM%pd}RHsFsofK+CpMw?4Agh|X5V zhMDv=kN78w1rfwJC2J*RAchk6L2S%9C2V@;(}oo*Is3VjP-3YG7DVI&hX^qRD`X`> z_EEeDu@jb^gUAGkJySoAfJORWZ3m zhL}xA4!7|XIS3053=YR;2cYS?kR*}q=A71ORzMXn>{4&G#JHiuP;GV~@_giP<*uI= zF|#8TP&v)&PTx$a&trR!zarrfK^x0yM?iBMvsY2Su@_~IOysuYp*BN@<180(n|^=P zJ-_C@u3FABr>uY9QH)9Y7pDVrT1x=_&EygW46&eZAXn8yR21!(B6}9R%-f_;+LS6) zkN2wBRR+_~)%G~Toab77Fgfsf#i)QMpWZk;m8W#?td^rj5538vUXZZJ8KUE#S}i_qi)A!~e&KF4ErH_wt7k6CLs16I%N zyf!5!WKCjT6~-XLE2!XzO$Pb|1r|Zqwq2|2E>vifhPyHOsWn5scOvW~thow}o>Q+qe-^*fjHK z?M-go)j4z;ucBu4{JkE=tN?dQu2`NM-<9xHte4M}h+M2N{0OSV5!wCO=bKy8s zX9}Y6hoNi!6oU=A!MefvCysixTW_ciy<4sQ_m>C>7N~P5&OA26&HR>64URfL-Rh-P zj&feYQa!7?uFgd396Q)X#mBFhUie=zLi;t*^Dk;0VMUclf;aMATLZjg7Ptry*?UC`E>y9+&7vB3E z54Wc|1`22*b1W?Ug2_Ir#P&z`HQ@3>zm(A*mV8u1%Ti={78`52M%(r{1m2$ueKIpB>YqoUU`It3}vzWdsKg%)*A$-NlBtPeUZEt7VWY6yh#>|sTwQ`(@5P9NPMubSb53?Fs+`Re1?3heqT4p>k-!OxnQ|5v{#~#h&C0f7- zg3>*!4T70BQ+Q(v?a}{T&Dc;n5R5E05nt0f4RXplv9$QheEY1x2dm!2+DZ z#2Pn)s5X7k_!B@RzrR@{B%JCg;S{}nM^5ni)ihI(T? zU->ApcD!6r($K8c?li0HxcWSc?IF^nM~3mlOJ=MKt!?zSoBu$v0=dJ$9+g6_^FOvV z$NmC?^?WD17+iGmvwwg3%8{2$iJ82r*d1Ko)&i3>Do$H*x z_uAc=M>BKJJ@deao_vLli#ho#OlqZiS6IIiq8cWwj*eFnF${7IbyT4oU9v4&QHsG* z6HJ*3>)?fzQ|2P&-48Y0^vYUJ`I|TDIxo=!nVG@U5 zH2to$+Xe7bM*rl@iy+R3@$YvAG z;5;COCLzE4!2N+|fa*|HWa)j>UK0IL)y{l1tzO2`$EuwkDC0nn@6$WJEU+NK74QH7 zq^;)(=1M#sNgkYdQY=vw{&WFQnxO!o0e@+yqRhZ(`&*rOZac62s02 zM!g82h=W@()dI$#<~tL9=EZ%!m2;?v)A@fZpP8r8d?)BV#}cPb53f7J+ej*E*7-z~ z0C8-@^rA&7%#8(6KULf%u_wxz{IyZY zx*%oDaLO>zfj(rw&OHD8>uDF988mv0ByS^rjWm)|0OlU}5;ovz`iWB%P&k|_%S+jEc$M}aNSjLW& z_V4XIz;1APYE5UwnPzZsoz}fc_JqKDd7y%GV}Ypc+rj2PVeH>T>OuO7h*GrZdn1GN z(FPL1^C zJ;A%757RdoR~b_&w$z|+L=hjTYLKCmeAWoj`kM>YwtBwRX}e=|sXniVgx9Ipn#@(f zVNVL=gp};zRj?H(m}mHfdo`bg|F|hzOa*FGO@xs8A!!^MNc^?z1?NJb&45)Qi^1bd zP_asWY@ZOG9*6>Kq=eZ8sj3{-@w^W4uP_5|9!2R(*U`|8ir*`_V?M+Poppht-@Ipv9HW+r2U6sSXJa(>McT!F@PH_fk;|4;C%U7g=C};SAgR2x} zv5zcy)4tkb(&p)Tgm{loq1!nkt3sT^vM#D?^$4$IkJqosPNoFCvZ+0c6n|6;$zJK6=f2&kJKnm36R;wN?xf+f|hXby4Jgryvyt$p=>z;BaeCZe; zA4;1w*F}b&lW3q|O6GdFwV)U8*l`|RPRZX$%Q!O5W(oKeqPp!cd@?v}{@NLTGE`BZ zG|x!xPDI_nf~_925m;i8Jp><#L3aaFBu|5rlGJLa#3pt(Rh5zdMbQLXXf_v8Nwi(2 zixfXlKK5H!$#~JBdf{B)oE6#hHwV4e05%5K>!Ipq7LNEYKTb+=W{WPXj-#TU`6J!; z%5S`7&6u?{>#d*&*FFwhM>N+p#du??GM6W3!Ng-$KU5S0lL8d;KtGhwVM$oSAI>b6 z>k~-;Z9HgcUXpCL`wc78zWK{<{4Yfux6n%pAcP9s%f93EQYiA)Oqw&Hw<=(Z%#wEM>{QuA z=sscL^4^nEHSx@13O3>GEPfRI@{Asy2{o|(Dvv!=(lp9vQxzw@Ht3kd1f<}2f^d{i zyI)`7P>OVMhnCthU&}DB->O~2Cl&_B)bFn}^D7NX`Hv$6FWg!gs7CX*2JSsvXXp5< zEaj7OxdJh^PDEx{V|J4n?QhTV7@*>HiEEVYx!*Pw-+nDs_I=r$pJjA5B+y=K{-TJa z$kveL7f>eDr^|X)VHgO3ZLE`K%zbH5U zR~-H~cyc~hT+8qeo1epZzq}ZNj@$Nak@zT4T_U2Z5IM2tl?MoyWLB0$OwO)uZT#jF zEw9IieX0*I?FJp3^t?DjWf9mjz7K|>a~Y{|efLROzr!;fs!I3p*C1{h;~Y(oHJR1w z(XXD((4qjSNB^c6$i+t2_^rbzO|-0Bd;8P30+q)DX55=NtmV{MJPcxPan3j2;z*IR z6aE6lH=BiDLXns*Jn`Jrb(1ZJcsNhk(H4g;MZ6VPw)H?rD>2=|x97@dZ#R=j-+fe!h=S*R=k}L{Dx~W!Efk zwYLymgf$nNTDWONmwl(3c=T$EiB%}%C6pj*_chH_F#-F+JlVa09@B&+G}ezrYbgD& zG$$+bG83rwqdRZTx1`v3|CbHclVqAtU$lgku(s9h0`dyD#-~}RF70P$Ui$e9)RMTD z3=m(tvZbe(lNl*@$-*|70Pq{G_2{heWRW3{^8{KI-=0FiZ?-O}smxyPW5-l{F3Ly9 zmKD=5iVbQ;SB;3F)K}uub8q~|4J~y{!vX~FL?{bZwKqRqX5+4>AN=a8I%OmiW83y) zTlp~g)fjli)q}l!9k=CNl?KV1p|q*E%&t23Yy4ZFl_*m&B3Xy(9n0i^^{iFzX7hkV zBEFjfG1!cBVrJ*`XvrQJ@jD(u?I9SuGm!DkmXFz40-t;D+Rx%u4{ddb+s_68D8C)) zy<>Nr z_Q53#85`hjMA;(b!SzL*B}++3F+sH*Hv2omu()&cvFW@q^Vq&xRL}u*YHq<#wiP^R_ZDmz?+0Nt_PX?lpf#&Hgi!4Ezh^R7<1Y;cY4p&5yEUDulY*yaX zaNu^<(`T}K_dUWUCjWaWld?~ln@H}i$&wr@jOArprH&w2ck<&zo+l>zSoEI-|2Cci zL(}GW(T~Px+XC9~DotE%P}%kLe3H?NV+S)USTvs2@pGQI&EIN45LbG*#Z*Wlpi zlhI}60L5t!|L<56v`s|P-0c3Bp#6rkvnIfEE-0*RqD&w98AaMJemT4Wt+v1CEuQ;- z2`Z~*naT>niiS_ZLUU9(rHwFbEf-VwTp9L;yG2$U>nO^Q+-;ff_UG+A`Q^0&Z!OBx zsFISx=O6OEVHmuIhec@QA80`!i!lY<4#`eR8aB`C=qpj{ps2VpAA}v6$`9w zNvF|cWsGMgKH4aB1a5zEGo+>>E|sHhOxF{}dDj@SQk*KSR*o%rumHE8v)MkvHtYw|?X@xU{ zWx3IMx}*Kp_5;29r69%Eht}9JGuMtVp=dvBD>}@hBTPZpv#ON&vCC$KZhw8TrShyN zRd4lzRq=hQ^B)O7OA#g|C1HMZ=5;35W!K3VP}R8W;CfHp$xr%davSTH zJcZzYlrnQFn4qK;nQ!3^H3ktw?Bjb-xR zk~8+TMUB7h@M87W@nMzy;fwlCXooV^zkf)Bw0;yVkPE(=&_0_Gg!Hn03t^$?7##OR zoJ(jYOKe%Jye=5?Zdu0c^W@5zF=jz;jWJz&Nyw*-5w(vJSvuXJ^+U5Bq5{}C+)#h9 zWOTeRUD)`vI>I&nhasPKvXsZ%f*ELTpJ1n$|z$cj8TsN&kzBxZa=4wdnIm#py!;sT3wNKeMKB z#;qV@B8J!ec3@q2`)thFb0$f`Li_akz}@en6p zoj{o`8&w4VGJ_k~KRPZbE}zwz6Fsk$$J>YsMT=t{57Zsyp`O_46rUQ{mU}!XAGX=)&&+jfMxVo`)cMf)w=7@&<+ZZ#A0=xsiFSgEdAn2LUvIrD+WX*G zFE#bsj4__gDh0$Gj7(knm8g?iiQ+nfvkvCIEIn$MAxoq`@!t9BrvPNk8kDh?`IUOR ziZ;f;l=n9m?)@OBF3H^dS2_jcnKi{z?#DG&BW7=<%gz z_SL9hZj6g$1^~M@y0S=tn!JueNt)I(6j{*6tm)Sgc=~j42NMH1HZ|%aJ~zGzDUQaa zs{w)Hcqe(}TuirGU%|DJ$N9+Ziu(+iBo!YmVmr1OHyzHNnON4ghqVNmDp9Yx*wKS? z*R_+|Hgdm)Jgx5s8O_|X-%8m#k{?}|U|a~^@gPF=aTPWr{1tWMLd#Pr%N1-Q8T+|W z$cU;nE6{fLuU2Lq=@lJYj%Dk|>`hwghBXMCSMws(=>G{XQ<;Bu(0q{RHG zSbiQa>GoD;_N^NWHL`HrLaVJ_IU#!l*o%&P1y4`9nENtIJcxQdW2-VLeu}tYa|U1K zEij4IUZ}N{T$JGy0!!hpszC)gnLJWt^u5}w#^6H@FLW?nV4ml9Db*kY_QW5PhEhMwx;5IP8Co}C$15obaN}v*$s*Th?k8tFKEH0tL#?8CAYr&g;Q9+# zWs8`XW)m7<3`~C4FEbUn-Z8$jkQW-#FxaWoSt;Xot|s4Q6y=OFA-5E&whJtGttT*O z2eHt%$4%(HYsZUEe3Y>g{}QA6D4>uF!I`B#D!{)c`Y8Qbc+n4zHL4eS7H zk#bj7W>+AUI-;A;#8s;Qsfehkm=o3Od~<&iJDJiWm>LfS=Pvp51d`dy$}s;}o*dec`q~|F#?Zf) zjeMI3=ck7(xs*i7Yiv>eZv8-EWo~HL^t*0H5iY#+U^NN%))9uZ4_n97>E85XeCa6L z%>p(eYsF?;WqeBuXxf;HaZO8Y!k)S;Ia71S&g_VWuoZ<&zY#in?2#slmwU9XP1Me$OxSCPQe=*?|Ut8rlnj@X}vimlOO<6#g0Dxid#ViSrsx-ms))Wl~Esh z?T=KR%P0_ZTTD7T4Q&&Q9Y&`TWYw9oTMgD2K9_nk2<{6?A z)m?Jhy|1yNL)wX>R&|C|D}Vp;D3pPX(MKV$@3^^22f2dQ48EBIS|w%((xGM?+AvZb79%+I1b3-Ys{FOtxTibO6L8(cyGi?6K{X33WcD0C<Ntw1UoqmG z&60co@qB-_OFw6(SG{CdVDGA)yWz$W^pSu2D%hEQy@{}+XH!n!lXlU!u`38>!vBP2 zii(Ebq??4}`hFcNwZ^W6M)69G%=ym4^SI`#nTM0{An&#h|FaxUBh(kj4JWGyfpchm}=Sp9p+ zaVB1TJzI^H9{_%<>n_x1AvbGb9o6mnQvp3*GIm!+{0~)XS8QbT3lR;nLie0}uIlhXcd~z0|4fz}bMa*ZB3`?HMhuOA+EgK=N`cRE+xU!RX# zj@t{wxl$P(eE3MX)o1t?`@`ht_$O{TyvWg@63_$k<3LDTvVmT!cPQp}HiXmO>RKlm zn#CsYF{T{){poLB< zY>M|Yc=e-CP5XjR^qMkCF|`iTtV~=KV3k)vs%u%>bcRq`F3CCfy2%k1`Q_~q{xitP1TvpK{TLSUKq zbiEl-SN8U+30DBy>;%Kwx+t(YxGK=ZmaiVL5nXY3iJzj`XsRfoGjaNbA3v3(%&=p*7eAVrsn+CRFu|c2A0lfGxcJ5U+SO}Rn#VwXL!|{nb2d%2eIz`v#a;k-bf}RI| zgQ)P^g`s=}*}`r7DuX5!XRE26=r^g62w2beb41}Gm@K?~3;EZJthbfcdSK5oz?UfP zlq=mz&LLY*PLZhY*JFIKnyxFiwrvljcPsH4{HYws)Z zeoVMiZrAamk0j(V!?15~-^yv8GB0-l%xuUw-xKE&Qd)^&zyao;aQ7pI2Gw1ByK1jw zeH)`KmTM{+s=fT07|r0av%`M6k>PePdCwlr!lU0Hit(I$3DBB(11NWjrAgmeXaVuN zooz`02pifYOTR=M97GdSCnsW3oiXP3;$r zuhZ^5!`;dCeN?^8YE0~!=EMZUU-#Tgp9dLCnkwwF#@9Zcf*~ORh0W?$MaF3vK_(110SSZtDaTQkc*da#Umz zE?}iX5ZeZhauy^>rPbSHK*U}u*U_Zno-wvT*mcKgOB1V@i%R=an&MA5c&G6M?c(70PK3$Q zDx=8fJmrfo@$u#Qbp45Moq(3KN27Z%^^2smyqyZ%S25!Un;t*@k(k}9(g|xEi5wV3 zky1jJmQPC@^IQ^c15KJPyD^pJt}8x2siW!3=E;OUJJmKAl0^D@dru+w+?bZ`G0zX5-Cg&2qvZ!+rGFpJLLE44xfqr^hd>%jK5C11GwEdq)$aM zw8o$0e#U?D_Dr+#s-<4dI4XR%_r(%ub5Hyc@UDJXfp1;cJu;)Ws-n*ZgPJ(5m*y^C zj=#`=v%p!#tM*&dj>^0?@E>wZ_wvqGo))ZqE)F!z%*n2v(RU#k_@43#%dyb{Q0Z~* zF7Le`sf*!7{JfhmV!?R!IA%=fUKMstmdR&Y%y23_K8MMARTO|IabCABh_4_1ol&1V zYO?%PF|u}U>{1Et-R}hX{Bi@#yK0OkkFmVo{9UA6>ozbt!RF2aTEV%#ZkINy*4^VuASxT%!lxP0AJJ5P zXDe+=VEdV;Xi;f6(z)2T^cU`PY*vXX^jJ(6nV3BeyRnhl&pBIvz`g=4o+~?PB1=Tw$WMUPrevB;K=XcQDb6Mb};1NYr7_0psm#W5#7LQVIjrvU3fL8(gynj zNlAgUbPI0S*E-<}@0`l6JL(X5k9I}y18VISKq5?aZ5uEj&tw5KadrRN;yu1_;UZ=S zZJu~^nI2a{%{kf}AkM#HnmcW@JwPHBb%MElRbmCB{j=3E7$ntEiQ7I8m4S{>KVOYF ze_|pcWGPWnMw*v^>Bt7*oHF`1ty{rQB8gIp$U2NP+e?3?6ilQrQWh=Rz_-(lu$k0N zCYXQGzEvu`pC9lpbE{sQju%o7u`_TKv6zFJQ+crr?V5m^tKMqIlg)msTXQ5jz0EQ# ztt^rdj~igxm+z>g`k>sfQ;y&ze#cyW7!>X9PfbSV$kK!uVaYB7WxupBVU}C-QTsN6 z{PijwxE4wc62IMtRYdyHG6xYwSoSkHRQu%x7I`cIhQqIl--2$?>|fwJDn3+BtYpsI zdUb%AU!0W(6ce%|!(@uKPZ?TbVE?Fjfx2zS-mulAN8{6!7gqeIU=iw9`0L54;v(k9+D@Uq`g;q4GXDmqm+>U2 zlC+vJp=N^J+RDjW>8&v5@%fnEGyNx>Jt&W2M2ZrmH&vkT7f(&KYntovE8$9RLk?no zBFZaNq{0UErK6&?4S|QEg=aMpsQS(;>5oFh%Og8wKSA%)h*W4fBRf?yGJ=vp$m~!6 z+=G96A0yhaxd<~syV-9ISvo=nYgIuZqX8}qwxuGV%CzPCjR_Y_<58jXc{lQZO7dmP zd_`n?)SJVgVZpH9tNo&P=kJ7-pJ*$r%rhqPH8hyHZMElm)+NYy#B^WFbJIf6jT;ZwsUD5@R zdi`Ot)|>J}ua>R&R?6jmMk3i(oK*7`;=jlZ2q;j~*P?q^?C%!>jK?R6M8JABbc6R= zf%lEH8XT7v0HiZak4{TlU2~O_Y1&pRC0pULNW;2Q@bi8Z9AB~GEYAm}Wx42tFbZ0 zI0{h?rl6}Z)VJr6lxh>oSgWyi<|973C^S4U9@IQYIWS;B=RzPWB!&x6o$3I<*fe)a z?)uKP7X8=eblc}E&FTu2pL!COp6GiZEAHZh2Pa`>=SIP0zn&LzR9JrfiEa@I3tWm9 za>BDEgPNt5YGZW&E2d#`tiVz)v3$QzZO`YZ8}UeeDD%JxvD>TUHT*6m_s6UbIel$R z@$(n;7T)cnFS_g5DFV@TMdPM`ujhjEo-t58_r3bCOXe^WRQx=+tdpg3{qDj-HD|`( zP8QyHnb*DOD?c0oyVzpJb-G0si^y@&oPE8TzB862y;W|ivn&ymxGPkGE03O4Umwy@ zV5w6xL;LX{Npz_fw(`vKlB;b(3b1go>hF?m9z#)^;#v33oo(zpXbIE( z2BxAE*lJ5i(NBd(6Oehg2PvcMhP<7kOFDUXx&E038`TXDK&lfY4yW3qK3;JNc(!|CsuxFx0Yz zt4*rrbw&R$Am9ADl&_Gy{AH1PRRch%FnMUL6Kb9Hdo5kUoDRW4rmiWg=vBLm;#rbw zxz@J*N~G{uCwFFMYNsvPYCmnaKt-L=@VmiWh4$&(RND_OI^-o&xqD8DDzNfpqH*}& zj$+`ye>OGUzn!W%xioB(1l4m$;rVL9vzp?`Duc9h*&pCY`$O>NXk;RV-(LLFCHV08*%q7>H^< zAp`)b+F5Pbhm8#yOSV`j%HdM7o1;<xW=i3@|W}a$B@=rPGph;(sBIm(k2uqQJ8<$f>?GNr< z+0MAN-iUz=FCCA;(*|`^ol3brodPeK96Q;((&ECe7z-9o#CBsDv|vYhGm45HXm6l@ zeqWXBUp-B=e8Yw(Q~8M-ZV)-v>(b>KoL<}Whgjp>BW$Z9W=)w}+E2!jNVYgdz%&Xk z&@IrJxro2>AOL*^fxsh`i766rN5*|`L>ed3gvG~9-2Sj-z5>oQ;8$xEl(Hp!PTzca z5lq64ypng6nyB{zgW|k*j6sabv>o?k)IQt5jJ;A1Pms}fz=rJHe7+swT4qSey*kqv zHKUzVy^XP6Wtl~pKy(XdT4F8lG%Y8#374^xan&>w++PAwAFy7cgVh25J|A3=N>F$q znFk#3O=c_6rKDF+F*sUgE^&3P===BgIw7tMEmaR*sGrDo>2R8eSvAVpQIuvcPWre0 zNpQq?XRo&(#m-S2gKy^16_s=LTi<4@U!aS?v1v=RPH` z5OZ7YD5(%bX8tT4G5;R=(Adzm(#dNEk`q(en}4d2Z0kYSU3y){3nJ+RE@e9{f&KD9 z)2u#mH!h8Na{JoCnb@wv=jK(98@^rbm@zF_&^rg`_;-Wznutm=dMoQ_sXludo9J(j zn22fNtthW|3icmW5pvB8v7hRCv7&+5ppG75U~%3%*DH5#ube7j>2BSxxP|My)_mtk znpVXLUAk&^sWZdm#!q_5kC`}CJNXQSp-d!o*x|c2Z`Yvo+Zo!~k{~_13jee9q-R|( zEId4$XO$jv@)6W}t=FZF`1-R(T^|zMTN3uQ0-m9u#Kdl{g$p2bf~uxv{G#T6GP~ch zHD>-%Qw`v!YFN9{6O%t3?Ms@iQWmH(y~%{asKFJ82K z@7G{ud)gvlQJq?up8EMJD`jM_j>;ibGJW)Pt`V@JmQF9gi}A48MO<;;CaWOd3{!(w z1`-=gdw)CIUtcjZyyq(P1l+$4`)Z0X_0QT{U``W#o3HhW8Esv4)uC`nSuOHNZI7}4^~%iclquMbyhGF>n5aYlqoKizmJn8^cseZ z*yHWWg^RCO1aodHiNy{EZBPfs^S?++$$_~O!h``Mq0j@8xto){8gu zs?bP8$3!A2m%FpNQmsk7yD|!Dh-e#+A?c<};T-%EgkDjsgs=Nd#AA29)b1BCA&{-5m11 zdGtY4^n7O}0mB`RElo5{UYB1tAduxb=w~_CK0Y#KJnt9Y#&8t-?V7v~53($Av>YyJ z_w`%V#cQXGG(&nl6&+6<=a_3x9f4G*!lXg0ddjWW%?xi3%iSw&2fp+zGwxR# z)=hHXL}Emu1+t4myea^h1U$DreL5$G-51HhHF4BZeciFdX;S;81*a zD0UdrxnC|$k$|g!m8fDF)n+T4Q$X5}s_z&#e8s$l+7X#trQ}g<<^cq(c)C}d{iF){ z(gTb$R#o5!+%Kq-RjRAKGSX7PauUIzr{RJ}1>obJ-%tV-yCL1frfm)Jaaf_W{2*7X zF=6M_kHE;_r@42P8Loy`d`PIV36I`?ahF;@HLd> z;HB*`mg_#3R%ot;SrC#5DYPt^81mdX7{}2*=)TBst%QEed{yyW4tyGO+OK4KwQR21 z)Xk^$9GwUG?@!4YGwB@qTH);nRzVj0v6S|#jXUeV6lT);oL}#>*cyiTGPwL4IG|>1 zNc*1&xeCI3>^QqHBiSpBCQH(RX%O0GEf-1GSj*nygYmN5l;-Sg>E@?li{fnkm%tS;f+l@w4@ z3eWWbzLILn@E-|6^_;z=sRL?)v5$#BJpBfv6Pcg7_syX=p_UUZfQt8$eOz2x%5pK3 zmt)1hfqK#rg)Y2Hj|Q6eGJ>@B-p*WdA8X4fS}PRgwmu;QhUg%(sJQuCa^oqpi0^aQ zH!S#R=`Mu#E0QL-)4kg|!BM*WIaS^?m`0^rfel5fLRIL?4;Pddk{5&)F!=`Pbo4)+q^Q(eecK?LshSq=$bAAk)*Doc+xc>SkxwYdM4k*9~v38WfT=Ta9q4RK4dHTDWxLx)ebCQZoFO z>RgGn1!$5s)&pp=63QvToO@E*3R`kMTx6jZgw*b^{K$KCiAUeMJ)sqf>hwXXy*`JE z#JxPp%J22aHY?T7ltrT0f_SXb(0>r;Pr5#x*BMN+tO(U_78Lopc<1LmdFo9_~mA=cs-Qd>k~vK^OA#?6PLujXy=$9 z>Bh;m*Ja4~$?F~}uuLI!n=WUXJP(!vK+y zy@V3}#GNP3VLiHe>sSsT&OCQXA~XIEZBQD9)a&H`ge1HIX#%-jn82TjS(soiP4z{F z>Wcya-zeBJg8XXGKD7FL#GOf9%>rM#Y`~c#FTZPqu(%d+vh)Z^=c0&7q?5BQ8b?;wm|sdD|FLvr|7{z% z&wR0KjA82ho|&-itbd=DBTlT{-=bvsu4ys|{_Q+%p)h)iaA*W{>xW8v&fCSNU4VP|zre0hbEx8^` zIBBzqP|vcuacotdt2xLa;=Z;v*_R@rM+?%x%Sc6;U-j`lnCuddw>-Jg34eds92}Rs zImb!W^XP;IuZC5GA9#pGFz?9{bmKd}_a`EDE^Lw5Xz@fZMtfX=`4;Xf4NrDt(wT_G z3Yd>p^vYK+>55}c@_p|+wLlUGT(OAAblgcA$zRw(G{mtxy_^J{MTG04 z{xP5L5Gd1-omI&M{COkQ{oKQ)f!1N3dyR%Rn*6Adizn|U!NSOQKvp8r$?u&$>WFBe z$*}M}9-XL9N3*-he%#=$!0^E3NL_Xlw?l7$1f1DjG_f|tjRw5+VfIR&W-mt3{PK!I zpxV15^9@sHZleVlTPzo1QYhK{a6*B!9({jra{MK+q@E68T)&wvTpqvGzgcqYllFsR zz<&Bo`KlBI3#8t6Up7OCM&C{-+J@r_ZNP=*v$OP!K$12Qm4%hR4&Tmo@ zhqYDmrVF&++B@;Mg;S>of6u*dw@hFS*`D*<_n1cwP#bLYmDv6%PL%cy@KgQs=>Dr9Wx zWpF~=IcDczZ{2qDNk$e?ggmlsy;Q{hW6gfG-w=&fXkHl7Rixa?!#;SfB$ZK^OjWTr zGa1oH^2D&w_t06?SY?wRt)&!@!mS&2g@rX78EOt1N6g{oWH!bTYWvZ1F7)3wAXdJR z(ENo0Oba$DJ^mjtDvHIYM{nQ>l^lM=iM*m7LYaPM|7*{5^F~zV7*ZiIPRrd(C+Yr6 z@1{Px6@CP9l;Y3dJ*dF@M8@<-6D3i?^X+dMW#{V`4ZYxm+`%)nEbj3$(+Q^cw@7R+ zul?^KM-<5t9 zyGjy#tP`m^Ls>U>&i?CQ^+llSIjV5E42%Cc<=Fc0j{462mQGUQ-exh?=698EK^t*(-po1P+zPawF+p!6T(5tv>k(itbUNEyRuXq}Z{>j{B zw@nw9tJxC1BeZ05g>DE&*#X?fur5dF@A(G|jQNP2BWG7EZo;38#V?NrJ`@Dn9pQ-Y zT+wc}C_iE4r)W$3iO}eomEu!*Oi4w0>D4-KAb_LZ0!M+7=O#mydLKo%Nn>Nc2d+&Vh-;~6;zf_Yu0yAO{AYr;PK0Rh; zEIxONY9~@(y_g{&;5(&6g;eTl-V93D`)_*Mnl5)~{5L{IQQJ9Aom$-`Vdr8CzO$xw zt@3B+j_BoZz9-D*io%;Ai|8q8##h3dxo16Jt+lWyFqzy@sh!l-oC&sANh3N3k;DHw zHV0LXKa+Ih9{7Tc=mt$-E*A8cy+^i)@<|5u?B?9XHriH(ZMiP zd(Po<@~ETaAsH{BuA<7~x5uILxDg3*TKaIZR2`wcd}=KDeiq$I4^TMh5W9wJwd14| zn19*VI*ruUlG81B)H{&jt&iSBT9ugrl(-MTg{QM|dBo5z;+*emKW=DBvZ#rAS^il3 zLD>0#T9^RhUyYEIliDD3*B$LV(RsUlg9FVxIV^qBtUq6B-_|&yUvD{|w3f9;=LD%~ zJC|u_nR~k^jo<5V+kN$B_Z_<=d>b=;oq2fvnQXv#AadtS`8Qp1raGt@E34HJf`Q&M z5!U_{IF9SRw&Ii)UF(~tq*O)Pu5r?#D*z`QwZ#nMbnupQ?QoVqQ6-p2NEJwpQYj5j zrwIr^WDpg$gU^FOO6WCs+*uP{9C#)@znxkUyTY@~vm%+u*>@&9w>Os=cg?^&SqY2L z0D34id0)ktcscT(WWe1TS@y-uVcFE2Nc5PpAc)LRtJEqk#8RWwiP^jlHqe$Lo; z((X(siX!|58Qz;_j@)plD^Bj?ExD?Y#C_OoDcxpFJ-U}5BO|*$DDUI1;!(7+ys1;! zLhJZwTly`?1EU$A5|SQAbiG64{NWBbC z55R%~5UAT2!ZdgPpWwdYdh5zxXcb6Z@3nvz`v0ccN)wzuGm1c8QZk}@Ob_^yL&Za` zd3eCZOCVZ%%|=uUSAkhvZ;&9QM`$#!ZY6BAl!ZSr^_t%m+&`-S{*@ps>?B<`pag7{ z@YQrme!pL!b`lqLF7pu#J=45gHk3=L;5S6@NS^vcvXon_}o7!!_%*~=sE z&Z%%J+T|Ivl)E$V_VJne;)V3TkN1sA{1>wIW}F{Bwws)J9BOMGxC~!uN}iH+RHW-~PVunRI{o1adqLR5 zdsCEiC0?;BXGL6;pU7hMz5u|3{U}=9_WmSEbr1IO#;>M$00uM|plzMuPkU@UX=PI= z2V*N2rqX?Juk)RAhuBF}F(QWag|J^zEkb#!R}h323e+3e#=Ps!0mrBDijHk5v!>1G z7=mAl5Cs?>WAdLrPd1zj0(REbMZovlvY$?YD^>(F(ZcPo{a~b52OA0g4-e~)#bH)) zC`MA!VY^3eR62b1v4C5IRw3L&(0TZt!n{wnR3IQEt=0Mtiv~#&^bM?1@xPC(`g4R( zBZ)o4XW@;EHxXQU>EwoqYDI=%!+B-lxbypK+$f04df^zCKV_E7C@Ix~i*5=a;G(04 zu*Z<&^3e0}=Fq>2U85H9tI82ad4Y~)oV7wk z(L#vC2c&{oC7k=c9%cL#oESO%b+OK}Vm`N?ddcl*tfswa3_?W|6BtxVvsMd690XK*o7N^LLRF>#(%$gOC}XpS3BEG0jVI#@W@BG|AaAcJwf9?kc0RL~SR^iutZ!N_kE z02T`3`afmmwWNvfqmM=qW%LxoRZ*F+qI$_lE%8H$NB^g*C?v^ex9udqYS&$6Kn4vJ zVbDeBe;+57Zg!F+-Fujf?*v< zk0_W}Lq)0ZJ0+Up=#Q+^#}PKZN3S*g8ZA1&-=9x$@+0FS0`O%tLs`iZ0_6UWnEoPw)%e`ve@99^ki~+xKTd-vZQB(hWV=pLA>6ZC|F{j)NSihustmPqD z_{*17HBgUF6$7R0>X!KO8j1t_`` z0)GVDy|(~k?F<&Itkl^X|Bs;}t%#$dZY2_dxWhjtBetW|H^^5WOj79vC z{P8bVig7)u^-hsLIbd!yF`4}Pbm|r)Iea9lAS~$e&l!=Ux<63=e5W#%kGI450Q?FF z;;n9993r9qQY8I9E9iug+S8^f$e8NP`NwxiRZP^q_K+%6QcXSdYo5xH#Don~jO0zVEo>$f8Fp{C6A?(kpcMab!^qD6s=G zriPQl1Mb$k2|?dDuI>e;|D}Oa%K|i-mYRmGGKmjfs~GdB;X|h?g6vX_;#|JJxuDba zXyE(;u#7>cYKko=`vHv;x#*L<29zE_ye$@M^;QZw*(kT%&}+G&))JxF@?uG%&VLc7 zH!C}z3oE)r_dePY$+0n*BT?KbW0SQLN8wU|r1b)i8U!Alk2(?{eB9cpBS!Ive$>$b zVfS#wBTQ!S+Sd3NCIaxVU~+gvnKdrSV@Mad7`# z)v3D1WX)CG|D1|JOQtL}t*>LSaQ3QP@3t|9Z_UJ@s8#`e1`kd7iwb@J8e}V&e=MHz zX}MV4KDit1$@^{@CI}n&dF@=S`gld>Q>6neIunBUGG0F4ee|N>3yr0Blv+I4%$Tol z|57ik*uo&Rbc&i{i{KBwiU6;j3soNnToKZf64Uz?D%_nWwd~93Y?;J`@unMFrnYU} zx!%$+&|V|bIfo_f3V{A_v05{h#SZVI(=a{ z$5?me#m_3mBW4QXKju#fnrtDmwPj(O=HFGWFQ-dKy$<)~NnD&LI1{wQ2QmUC<=|!M z>~&K#g2U&k(X?5`X$;q9RhV=;JGV?vUM0oEp*X3bzag_!OGLMA$|bd>Hq!2@Nh<`L zHm9~`D8`uR#C*}d5MkoOmtG7&DZUmVss(%On1ZiM)j>-t{jEz6Igqu zoV{`2TE~C2r~;lBufFXqJ{A=cy!qdM!3_akKQ~_c=%Lx&D)72No6%P~G1~g!qQr-b z;v0`FQuNxtM%)bKf{`n;I22vvx96{C{+1zedg=k?1EoQyxayy8PcmbyZ4t0- z6)-#y@xWf{_!3Rivs3e#cQ2iypDE$qRvbQEKuK-~4{v?C$iu~nUi)K!Q@_qTUWEJV zBrZ){TloFHyMw)qwQC>4cQ4KUY?0H93St^7EVno=j7GTc$vdPf zST8WY_t2c5dlHz2HcIS}@L>02^!L)de@&r}yD`5}*5Y40YlV;3Pgm~ag<>D%`u2;< zZUAyiypJvTpaKrC)oNnCrogeRy%vX@_Gcw=YV&57f@=f~U}h8Bvg@fmBm;8kLJEDL fuqy}U%pdh{%=u++ty{aA0SG)@{an^LB{Ts5!ON=! diff --git a/docs/_site/site/user_guide/plotting/plot_decision_regions_files/plot_decision_regions_21_0.png b/docs/_site/site/user_guide/plotting/plot_decision_regions_files/plot_decision_regions_21_0.png index f22915c9679c8d827f1b01802edce884b994811b..20d648b5badaa77564063199a8a9af2098a78086 100644 GIT binary patch literal 38363 zcma&NbyS-{_b!T*(xL^56$%u02o$#pQoP08g1Z!VC~l>=Q{3GpL4y;dxCEEtP6%?- z?>pz--&*(laafDR%VgHferMbB?AeLG(z41qB70@ZT@$)QaCda!}~Il-74u2lMal#?EFa zcE;ZwZ5_VbTA9$enK`>yIoQ+iaPn~SveQ_8|L!Qv#r6M9;B;`d;G%miiGhMbgCZ;W zQOzUsaK+t2&Ft>!>_&{&>L(igk2hZGU6ER-*jVi0`n3m(T9dA>bD*j=znPVXO5#2# zcXf8HE-Y65^dPjn_usr;iistN^FmMJ(q1N*5)wM|Fti%Z^*0WU4hbnc?)Lg-yX5SK z1D+Qqs>rbrziD#B|G~Tn_c>9>VHfL zA0(zd?>Bxt6eb|II{QhR9qz+OJNLtzQjX;yeDDjw5YCOD+C_1{W+Fkn9j$Y665>6i z!zx;+ooc{OU6QK&x?biDkmUWA2kq2n$>?xFU;n0v!A@LCH!X4?gbZkaeIuA?ee47O zx-k;j38Iw4xoLSbk`gF|DwImPv1#+0Tw2Gc)X^FkE&GfU?U9u>zd}49T(DxbScP>9 zf~ecDTq9cpamNj?%8&yIef<~TUxx9)@={0)$HK#xnQ1d5>18CvN#e&*PqH{@!4Y3T z;T<%z*iu{1Vcv6*gUz67DamY-_y>@kX4D`JLF>yFw3a3JuXq)YMzFv{s|+`YBaIi3q}P=^mo#k+|9;xpOtW?ZWu(QA9CU-h zmT(AU&pLN=w%es8-NL^lhl@_EJ+;WOL<777J=Mk{VG)DMAn1t_fv4bFr?7-Sh~0B0 zg#nE689k1P7Tq`JO7yHrE0f}MwH(C$Fq#qpi})f#;0b948%k}Z_QJ*R(RbN+cI z@zxb@0Q;I24W0*;)|NLHE;9-z3S}2{AlBM*7-9|$b_Wv2yd|(if5H7~Z_E2ceXVv8 zf_{&og?@SSaQN+m_0QUC6r8nFUw_)KIGHDi)g6l7&mY()u;|s?r z=P8fbx1d4)QZn$EjHU?(DLqrV^!4REeK~Z1t*mlRWvk-72gBI+YbMdZAd`%SQ?dtdFqg_X4xjPco z_{ffG;*)cL@NIQ^{`JM>P3IHq>ZllAI3@_S2~87mz5y`udkaoMy?f@_Im?cJ+sfq& zR6@}~7YRH#-9glXawMjjAE?&e!{+P)f#{E`JO>xB4^LW8G8UlpIJST%a-hZ=;P9!^ zNDe)%SU_!;FmMzBAtODdtZ7buP(zXJLirF4IEVpw{}l?Q2*8X1fXS=XA*qMLO;Eyk z!j66@c2^i^E)4A`1`vl_C!d#o2Nd;@)njRe+)$$y zsy+BH(}s9NML%@G?j6X0BxJx@GN33Vb*4+fNU|}f`sl*_<*7d;l@d6rRAxRD;h|iV zeYp)0=%O?->Efrpb^iVO7KND)GRh5U33NX-psgH{gH`Bchu_BOYbGPa)+tU`xOMp?b#u98A*FT2&T)TF0o4v z{B^q1htjct8W4^{6b}a_2l=cecis3}Bkt9qzq+7K7DmMYi0aV;Co1d&`?igK@Fwgo zliDWQY7?>P5K_W6@=&(BRAUWy!HzY6(LrHK&Dz&+8>s{UnL9pFW3HmK zDM`kX#0#!DtziO3`HeWawIs(c?7Z19p^;^;;(vp9hsbhYIDFf>?(D>OFjTcOc>c&q z>)US=9$qke;qFD~!0RtsgTwsFVcI9ig945j5e_E5)?X7RuwtXt4;TuYch5o)#j{Bj zbe`bM`L{RgN-B}Ychi~-Eb(d*oVpWOGM+LK?6a)ut|6s4)89~+(>@?KN({fvI8F$l z1S*t2KZu_%WAGQ1>E>`WdyoMeTnv%Mp;rE=e%Bd9k-tA)Nc?u^sYgC z=PuSWzuM45bX6q1X~T}nm;5xWFW-YJ!yW^KWn-cW01up=eS7{p=1;zNWO*btE5Q2iE&89YVY#uBbgSu28Gcnj*fS zuFBVG-uWGCb}+^2`lMC4i-i94Sn^ks-^h@LKC9w}jN;#Typfk=T<1tF6IftHLI(n5 z=+k*YAS5HXcw1C^GO$E+_VCV+oi^eN6M>La$AQfi=5ddAar{3-a`r~wC*n)!iBiJV zhW?M^xQ|T)1fEJAQIJZKcp!zh-i#5`dM6P8`-Gw8!l#)=Hz)+uLCtBT~WQ%-ZwY`{)RfE{GxD3BZ*r?>sXYK`<;EtW#|T=Jqs ztVU_54>|DqmelTo@3FG@uNOTEQ2;q8L;#Grc#EdYQrIK+pH-8ueInd3R)MFhc@!ns z>ZBnf1?DX*g=$(>p-7n2UB{G%m=#FJT_)HfVDBdB!1n4??Z9^gy<8h^rxLNhZ6yk} zD@wEuwWmbp;V7WacqGpBy~{c{j7SF+z615#|wIdRArOWCM&tYEonRKkJy`-u$K|Q!x?kTk7_1 zq^TnVGP{2E3&B-&&t@ysAsgc=d@X5wmPEMuTk*g{bR*ye4p;QM)t20n#11U)7Mc9S zNE)QnEa5#CB9Ntsxe9eg}GnRfVg)~@ETm;DBbE1erc`Uio)2VwExsR_& z4C9UZn_;Jjvts9!`2>&M?cH86%ECnJS7Y3TJ@Qf2&z;*>6du3`&%+Rbw8zO$BtF;g zmIR(ij*XE@>a+}HA@=(==Lmb{-*M-2j$J0f@J!`bf~Oku5I0kx&FhjM@6!w2MM+=}srNYhfIpBRb7-O{(IWwldMmy^Q&C#W(~P`QJN2y}0EWLj!; zz(1N2`Qaof$D;0^ZY&p)Lt=>g$A4n-a21r)nI=+F!fdf_LG83L=+Yy`UYJ-$`frVD z?HeCHjP~Ac>SO;*O#8I(fuo(n3$A<_YCARLKA8WVLu$w}H1sh;RrOZ_MI($TG-v2# zFVyyrI1=u2Dkl9$z!tuPjdDOU5NRWVR(>oqK333_f1qFOh1JP0g_#|lx4zH-(eu$_2#YHQDcoa45ck10P1wTf}%iwJ6T*!oa zGs~4q|I=Yex;n36HCjG??U)I0bP^;Hr}KTehKz5WkSgO^qb2NjVjZOXp9Opx{Dri> z`WJ(>CWeadBm^>^+8^Yw&MU|_);5)CSqdu|5XjY1c2ZPW|9o1waQzitK^PB}+!}mE zmEs`oLj*lm_j%~1_$^wYQU;$u?ySwI);}kg+YZ=aUh`b*UF3fqpCM@)5?-Noc{VCd zEr{GA2b9^*OZt7gn9D-j?Ybn<8t@*RsFI8&duA5R!VMAhHT1{o3;&|Sv7{sbp>&8A z@4Gd*5oCO|s7~?wpcTzDZxmvtWGF@=0pz zHrI=%FoGEIa>}i&Q-!OrPW{)LDUL7LY`?0>b-eYFKaqS5#3{6g2_~tq1|ao^l~;4i z^TLzZt>nec7o=et|2y0x<6qJD@+c`Uz&*!(tzv(f&~J!168SAO}A{=RFpdthzi$X29CfSy3fx}1RH zk%M6hNN-X7$CqceoUc#JN6xERR+@epcP^ebQ^e8nOK-}s%RciFh zs|WJu3KtL6ktmHAiB9eN-}{MB=8xPqmhCsD+G-IqB9^Y>IgafsD7O9&_K)1h&robl z=ot8LC%)R0*4-SNtjwJd>eeJo%D8TANMw^7zk553^hl}7HYLKC`8|&XAWA_StL03s z0E)Nr7UfEmC&hv7{M}8cvsdoY47XPkgcxy+VNAHQr3U6=*Yn`o?rj zU&@ml>?e|f4(0@YZHozm{#8EVeCQFY@S41j6mQ(tyh@PFeEuBMiyN+X`nglA1UF>&z7G#ZtKSZ?w2FpksHZ>6{|5JTtYSa znTzV`fp-G)>gb=L;J%7 zF3X{(!h4D^Q!o*c3-8z0ITR@}PpwlbiL5qi=1DW6cmpg#pGGy%X<*pnFMG7cX7^U{ z7B;+a=o0Cjp^yH3(FjHHl3vJ6OiHe??+!Dqe9AT!OYpp|`&iH@i*r+x{%c1w^#>mq z1xZ-ULXoEvw6gxdq-t@c4h$)~9e&1x20Fny#G9tQ+YH-PeLj9uAFwaUbE9A0m9X@=bshlRMr_f5ijF6m{_q*HygzzCTGCz`+HZJ#{K$R839kt&ZCL+F&Ns>s8Ei71>SS7>j>X z^9gx?KhStba||6lI0&w9HIm3R41R{pci_98sU>QH`~=}4^L${ne69b)NBWJNvj3pg1s zBF1eY_skKmPyT?43tL|FgfHK5k1tGoUSQm9V!cK4d? zFwYjZO0(Op?^GZs%P6fE>KAPHXcp~gycO&4nf7mRj6iY?r7B*gwX%<=B;v|aEcy{c=rs8<4`}7$)}yiT z*mY{4zC~iyInOdDDN=~Ry4;}Ku4njMlGnsj^7u?~+oZXb!X#J^}a>T&4qQ<|C@D~sZZdlE>%b34h8R#Q9TwC63<9WQEPCqh} zB2N1|v$Q?Jl`9t-LJOw(@=imj+yQUALmjbv>yJOitxn1T%2TN9BpBE0@>cFRYr%Vt z9@^(2(^Ya)ZbWV0VG+YG!@l<5dk@aI{-%}RQIXgP>rg)4Ny?n>B#z6P zPO*QB2=mq|!w^#QgR4WiN}d?{i&U1PQ;Q37A%#Oz6$-=HF4m^0J1OVW8*IarWdc0} z6k$v2C5Ix+^76Y+23bVe-Lu`)AMo`=s+=oUVFT{14U2D#7iFA0q5+4Cb%9rgqq)8c zy@Z(@jdgl0+)Vhq?GmWn5f13pf#l1rG8du=M1>6D_cV<3GHe>CS1`T`g3R+<>c@yeKyB))dw7& z(;J)u-`8O~Wz4&Cg%|TrBP!1uu1>r5(f4wkq><`ZGYiGo(Hwewn{76}utl6*cNmGRz$A;GT8=*mI*n*ZfIuFJW zB@^P2tkHn2Ls)N{vf(?BXjnw?yb#UmjJP|faz(w)A5+_<6Z*bh#Do?kkoOL6MrNIbtEjgyF8-(A>AN9a}nj=clt9vf(kaS?w( zoczxz{;+1T*vPWb*o;rW7q~A;0`MySO=#-RVoGVRa z?(606U44q`nHvQqtp~Gw^CcLnl@y4A+>TiaRyn|g=UH1q3-$5G@ zCHm1bWW8-C;ua@m7&uDJE&~yrf*V2K)~S+Ja8-OEBiTPkR|QTWE&76#fOJFG3d|kD zl#h zVbhaQM8<s2dupND2m#sPW z=?aH!Y`71Z%)NKJ|A9W&uQ3_na#(xcuT+yBoZ=G=%dC(a1nLa`uVoivQ-^!lR1!%< zv=LXf&H@jER=!I-M`a%y6z!1^l4YjgeuTv`bf(IS$ghbmH1)*@*GxmhEml4gqjE)E z@?5tCUph&1X5Tti6HZ9V%aJ!m_9;)s9VwDuHyKLdATd+g`>iPbL%ZN)x0 zgpwbE;i2Zm|zuel8q}>6U5MkVq3b!;aJPX|1z9OA|d^#ZTux}VG4;bBL*$k zw7c*+Y@4$V(n}sTsZg`I4P9J`P!4S6jM#3mn~oP*n@P$gFB+YP1*_ zX-1==oj<~3%@i`d+6W5xH17HL~+Za?6y$N(P0cQZsGqYXrch)1J!aZCyd0Yn;Na`6NQDcd&YMOr zAZ2qvCkpu6UFd^Wd>~?YaWXuIe&QI>cYqj?@Wvbf@v$Q=?Z#6DS(bZCX&vvxk1LjJ z^oC<(Z8hP~0(*_4jc4xT1|M8mf~Su}1$|J&L~^D}0J_GIWrnTel_47!(B?SXINpt8 zUXr;Mx*B+SekR2@zAbys0p>5e8i~KqA(rrv!GC@Roa6Z(-Z;{3EBqCbDv~2~m{Sc3 zHTLC{csxI%l$#x;6Pmos@Q|inVK7ymwJaq!?}yp{7m9b)&@AaQ)Ak19cI89^M1pPe zY1NDk@W32*0j0a{>$5`Cjy>}uw8w|FET;C4NtN=uuaK++JM_xv@F>3im9<88f)I8J z?@`cu#xd=2zD*^oZx*p6Q#dvdIjoBoYe_?@v}>XZ`@_)!A+%nI5KbKZy-EgnBD(Dez|3Ov-$pUUE;9sWQ?w zqg10>JX=W{RP0hj(OkVE>xE`ncE6(g+@LfU>^^%iYHv^V^c~Z}&4DPs-f%4o1cP4( zugbW;rbIc@$9w&5&gw6ptD%NY&tkR3w;DU*;%m7LNNwkSx;VEa_~XM^YWojBXP1FN zSE>Pk<9KxPQ?i+s%Q#nwS6Gt-xC;^2R9rwqdtuYX0`$0CA0baey@9Sk_g4t<;o5N9 z1!?o61BI|?gDYWau*-lp&urI79AvZVm*L~?+}#pWESVG4h@U<#aWdtbfE8W8{1#iH z{IC%7_1)x5V~y8_X&5Q#~y|7z^$$dJ25N8LbP-hiZvBEJ{ z!4|ukP}EnjGCuTJv53MG#eP+liIUAb;zOA7aPGtiM zc8_7y^3Df89Ptl+-djZDxwEV+3D0M14yPKtPLWlQrohyG*DlahP!Ut9uc zw)guYBo5>2d7yxSUcg(UopVaz9Us#8e$A^8?1w|Qlh+^sm4?4qMg=UHmwsi(F8k!W zn;Hi}tzr8yCP9SeqYwBjmGB!9YyOXO9H13=!yE{Iev^gCxN$Fi}i4eFi04C7qV)JK&{CkevfYIIhjsbbF z+fBUD>JQ#9zn{@mof%uPw{IzCC(SOrmwEh3wh>jFY|x<) zcd=fuw1%T?_4QVgN6Xeq&)#@V?C0FMa4)meDo1KsC5v}&xx6bNg#S1a`UV|BqW2uS zYV2cp`!|IWD=HFht&g{ffJpV54Mf%v>LHrHHXR6TEV}1!?rYY1KQYRQ9hY!)bcvWr zRK?*sC9PF4heZ-Xu4)@oS-gpLW~j4|G<~156T&ab^ZFps#PHWi8b4>-8Xy+^_C(PG zy=~{QncVwSI#OLpFPfL^;B`W-0J6iTiOi=_WTYa{AC`4$85(w(0`Rp;nQ56mmBU8o zkFMWL4-$`45>C^)w5>X9D#L1hlnIV|Y_lndphr1Jz43{1!>fZGhkIup^Uc2K*?E-L zASe7<7Aqt$ZMn89usoXUgD|0tJL1=WBdJ*au(PU?ZSeI&`!fn5hW-YJ423yg4&bms zh|CpE)a9;#y)FWM1mu$wf+6&pIOXS?-5ofB zuhuZv4yA{<-=A+DlcU8>$B#ZB>kS`-Fj}>3wI7oh2F{n-ffRIMjJj>mMW0q%F&hDr z^GRWUza*pjmsz@BLXngL?QyAovrLhC>Bw!ds8D7rs4UR_ahJk}*X7N_wNKYX{-X10 zEUxW|%R69IJMZ%6@;q*?u&XGE-=+s~I0sq7O#)A&KQoF|Ly+&Uj#UkXtr}u3&cI9;e=-c$mg_Rr)0ASzzZAGae{;|_s99ogauE#X7{+_G5*VHQx`F>4 zDo62nm}a0itt|3yi;^v-H-?rhP{J5XiK`WbL+T>WdxckfUCgJ}@4_f}x8wrd?N3%( z3x~G{Ivpei=>!yV<>hd4#63G12GD;3g{M1#?VlYi1HUohY!$q>h#KSK0Da1>!nBk@ ze^MdxZ+{=MaZky4?9lQU)hu)rbU>T8YcsOAeW-U%VAnsC1$@rC)=@Tg!P)%ox9=RQ z1X03#az#(wSNs{C$E6146P1tl-{9{Yay0)sO1b!{_t9TK-+`jqDuBq73ob{kL5 z?f;oz8qz4fb#x)uC|Rt3gCgO7#>wS-E0w@y^OM0&$gC$O`LJC47XpU?q|X%F@)sa~ zgV>Blv<^>+%@)85`!xq8myQ+!`5!OeN*r-*Hx^PjwM5CrHh=s2xBKvs9(ghH{}x9= zzdN3$Bp@cLbyukWG!zJGAQcXXP`B92>pZdP9>%R$(d+sl{s(dGS%{}ua$)o6PgUT; zcJo`HBFw`bL&9NxsOYU?d&NiS?I)10IiddeotPNrTZbX>atX&J=+e!0<7H*B<*vBV z&>2eTO|$(_^rEHTz6z1zIJi%8V5BTN3t9Dz+!cE}`9K(W-)sYREp?ZBjB{`>iEDp5 z_W;&b^xJoWHYwWPUw_`f<`ufR#ewAfsgW_fY&&N;|5DJ=P%wNzJgyh}cR`#L0ZQMt zTg;VLluUKpp4%XO9Nnt_dt;gNIl^gvZxMu>vU)*zVzAK($ZgtYd~>j*o{^R$5kRNh z)D+Nix)<>kTbWxr4;)tN>bf*#?8=D$1md*-Lu1 z2j%|!ZY9Q(Qx4M_zX_`D|BCHMn;(xfm>OpI>-DY8QhkT9&P5t*^BrS@t1a0l@f-i4( zl$X$xGnwhl7SKi_x#9+Tk1Hv(pZX6929nPa@ehK2KASn!smZSgsw&VEDNEk$;I*$= zU9R@#7s84vKQU3lXId@%g>OXf87O$>s!b0ft6z1cHAZcFE)5N_0U=Ot7Uu7>{{(D| zZLU90&?I+TA4?Vo^fET(VpJ>IzCK=(9+$S!XmL*=)~?I>Jc;y$a#^^;Bwmw}yh#V$ zMoQ|7?-vpVMB z3J0nUUO4a9(y*pv3R*cjTpx{Zjp(+WEygMBIqOBe9 z?z%xI&rRwVP(1#QaT(1a1KeCX|Ao8r@V8l#pK*FltT&*xSHNhbzsOm?@igoVZ*OQB zKA_q(zT3R1bC~TUdCT%1TJ+~X(o0>GU3Wj@6gpcye`ttuJ)7=G3v1l^QJ$BnpD|nO zI?28`kbJT05}SL1k^g|z#tm>=tUIrE(npZ9oy$gCozCvc-}Xus>t44K5ZF$=C*R@Z zs5*lot0tHj)S^9nP_C`)Gjy^)<*EskOLFOk28uUaS=Mg%zJ@4<55LC#oJG*Do1P)6 z|9De-Y9q9GY~IKhtw>d`z*x|fp9fBY}msw&O+N1|U zITkWO6SM)nEAC*<_8g7QSf=LOmh*T$6y$`{joaD2G#-OgKG+n{I~vnnDx?rmA#ZC! zF%_BxZSsY`6EO!``guyhF69#=W@WH2Hct?DC+?UnC0uJ~j?(xd)LgeM;|xrq_5!*Z zTxN51t>2^BcGsO`rYn1el9fiNIGFN*wJfp5Bc6qH*5-0NR7}PBP%-Fof z1*Lo8Gu>>uLQ}^dc3w^o$c0TGN-+#tKYzo0(?SiyW zq=9ZEPqH*bG&@CjV`94;30}C^=y@$S1Bo(H{36A?c`a~{dw>Etk_xF*=AE{SmGqex znsqz(?GW$do@MUny;IX|A=&8qS{A5M*d?AQ_hL<%&~jcS?KqN!Zvd$3c16{H5`!_gON^lBCwz%$228pJBzd~kT|}`X?i;M$29Is1 z3P0tC23`3kB_*DA2PG}L@5?4r)w5Xoy)Ee1B#NB%i)cey?6*{xfTma;X;(m!yb>8u z>*@-zWp^9+?k$p~RRo%T50BOskDK5BR=JC9G;%Q)>c<^sg=E+baV!krlAyWdU8mi_ zKP2u^NVDCUs29F0!}NHXZWe6Pd6=@7yi=9ygv#X-#hF(6v8F9QVk_pakX{Cs;kMLOzp~+5P9yamp(D@&~ zrm#W{?wE9#nyoJV$_>5Pq2mRnBUlA_4U-5w;<4)BhFfdeM;652LpSd~3JaJ-EnAXt zS@jXcK`hwU+%#j|5)k*_s*421RK`cbjq*L_q)4=c9p^2|$eNrnh^(Q|H2*vZEj(`r zE}duaf~8jTDnHRlh(rmxkICH6_stgA5slyLDIJ(^Cf5#VH4FqJ;J2`Zz+ibLJzra! z@kgkV5|xQQKxp3`Gwx=!$F*VsJn(Dd0K~$ZCi>Vne8PrjOXs=fNEu+z-(yzVp>3MH!mZWWjAZ)A@b!$RD3zZ!C>7)6F)S)Jys+ok_^USs1;0Z~ zB&Kfra_En1{qhcy|KcI;n3Bh$F@u1_tpE}7fbuxI6^Jaj|4tU)_gBV` zg2U-D*cb^(ySC>HO!u+-S>t)FPx+Qs3KKV^)mkIxIrs)Vl;YOxn&P)Ul?d#62bJVR zIUj2#lV8)P9<)!g8f|@&>hVw2g^@2pJcTMAOc?vy{d;$oN9>rIHj0d8_0AWS3@Bmg z@)pErJr(feRlWke;b9jZ#7Ln6c}Cagi>TY)Qu`^ww;WA|pZE-k?0r*ba-O2-coyG! zZF+?}&6Ce>uGQlQBNd(qcil|$wejT=5s0J6dAO~)ITA~YyYIm+;{*V#>`~`%?56WB zMt&h{ZPMsaG`BS~;Aq0kY8<*b$^hy@S0Ne9mlOgRGCU#&H(Qr6b-0f|X6gBy zk2#sYDU|6w_j(m(*gZ1huHI_qJ?xF0zI8asY9;Gb;f45ULRh|M;zt7fQBwG?=i}LRs7KcSksTm5 zc+@+qiJxgM`f#8I&|6+Hr2de6)F6o|Am|L=YOV;+e$YXtVnT@cr^S|b>kiT(F``H9 zYEr1%1Sdm0rgU-h{8D{Jkhzv=?Sd_isFhm&b*=ZNb<>j|wIuTpw5qO8vE*Dn;GB>t zq+I}gjh#KWprr-kc0l5*0(Wgjm2tUTNcjU&&ItbFw8}@UAF_~WYw%I(*j^NqUzca6 z=)9)_gI;)fnVUTZsp|6&-kMQRZZ>m}5(+;#un8r6aH;DV4R;G83O)jqjiEq50Dwpi z`0N{pH<>ZYMP*eyc<~ES_?t>TxpjD4(Jaz8mboIjbeq`ps3?zB>;*ZK=h92udswNI z$FjMe4&tpB4>2;+zI9J#5h2Sak|+ET9A0gpmzJRz1a`<8!G*g^s#`w_Y}2oefLgP0 zm-RJ#+i@PgF^TPnsN3#g68}B2ruz3jO~Y5&l$ zo;wKLG;wN?cWM<|Bb)=KZnjRHtPBxXSZSzFb_hv7Y>y3 z?asRshK%4zlw5t}0{Qmd>0CKa2@DwXcW<>tMI6v8br~iOQdNb#mak$K4cE+MG@Zo9 zTb|jv$@J}zdRTOxV?}zpS7tp`R+Nl#SuSu8A!~TuB(i0G{8~A79TbuOv%MjPLO->j zplGsCW=wjd?uC|l9t~5}6}Cj~_g(8y{4cxNHgUhzeQblT*TH5>624Ez46-f5kqb>+ zIl3UNVQS9k(S>TIqb2997(VKP&f~)L6k59Oh6U`s<;7mlVAb0Lp?y;!eZ8rk;)9F; zGxAxzbL^f3V=819Oz~NWBzPKvzKN=JHN)U$&a*D{$rkaZ#0I>}OODrB51V>kc$A+$$yxX9r1;O;d`Qna3tqZCcKmaXW)oSh zKts2NsOmb_X&QP>fcX<`>)BEoz+9O7?OLIfZu(K8pP;QlCl zM|kApJ|fYBLbonbu+4?t!aPy(@=&v3LWJ@&qz>7JX`QM3C=>zoF4Ad(a`Nw`8~D;r zmzQ!$WP6!v&ie2th?Q4p4=yBuTA8iqjY zN*de22Ndl`$5mtNugHSLG>O_5t-0X6x3M4Jx80s!NhmzuS?^RtAUpJ3Y0sH!ddMyS zr-o~;1PecIY|rW-R02~BjEuG0+e<;9)j!dIitML%LI)|%YQt^_v4KqqBdT0E$~muxu=p?k+9;g>*qBLF4WZb zI^#A6hdzjN^t+EXx&O{#<@}lIcxc#8%EfBq$`?Dk-BKxzHK$KZFcLBRMbT$t)uFmi zcMqbj`g`V}`+IV?r6F0{p=FsHetxRkwjccHVK#_9W6N6dS*I75Z@yLIOcNjH>uJjZ zkX;N?-Pbdji);}YkgMPFhzW7S+9r{(LwnMEMv1W$)rwY^5{7?Zy;SL5=bB4W0irAOnx z!O$gWn2mD-9E56++87YDY+FT99Yw%O%-X%k)B#sVHgbJPH@+f*V>gw^Nnv=hXYs{2 zWVZ*eib!nu3s6E`1hU2_ecOL{TYi`k39HNG+;L#tSg3tD`e?$k0GC>)GUyZeWAa0; zv?w1w#2C=#u{<;I8)acP{zPq=LX$Z& zX13oGG8X@IEFD&ysSPI6kN#Tvs+ z4(97ZHSsQ?-`C@1nRMGEB~?rO;FC|w4F{C1K4ib$C8V*QR$@|cWM});lCXjo&kwS% zImf*`S!4mybH!!B=32dbW&Pj)@d|r?O z$ctL<z^xTISsrlXjhJr=a*0Rm}~6oKBOTsI+)u$hgPuQg6oQ0$FJ)S}?s`JD`3KKF=K0ZpqwbEO1_B%YYByl3&WlO}A4V$a0B<&R<`# zX^U>kYACd4MF}5m#w^L4??x#V0Ii){!pvL$PTy>>uAhIzOSa`OHhmtQe*Zlx6Lh0I zqwMXEO4pX?qQHbnbT@FicrKb%rpX%g%v{S2lt5*7q{mGYO3z|n3&+5mJAX4R|0RlJ z7rD&N{l>EioN}ftI96cYFB1*lM3>z`(J^3}H%r^;c zR{t?=4ac>} zqLZWEiBgg=pHO+4gxBX%=1pjg5&UXzzm|g&Z0~oDn|<4hp+0;5H*GQyUPTV+=CE(Ij?k?`Zd!Ssw8jKb0aY4t`KXyVjuuMc0U^e<5vZv zdo`7)(NEo%E$UH+Rc+5`>G){pRy6#uvq?^XKge8K@2*V(g7?$?lv=`wzgg>k-W_+t z%s71#J;B_mULGvR*I%+7r$L93;PcO=oIPHx03dt9f^t%UcUyB0hFliQRx$$R+Q>Ez zEZeH5KXZE3HlA~Qk6UDR`Umj$=Z-RMD^ z^}E7@FU>m65|rMicCkb>dZn_~+P& z3;(u7>=|x~oc1(l!1vm!kcIs3&>*vIq1Nk@YL(QSfjk^GL6vI(MMG3(KStI3_-}D~Z6I$Uz0<^+? z_ZV}Rh^n$HTPHguN(+2hjD}W&QcE>mKUC31nv4-lp)WBU)tMMkJ2*=nW+J5cN;Dj^ z--MQ2cL%@U<-yG`I6!}YdK+;akaom*BXc*Ph0edziP@zb+A#d3Irdg_YIe z=O-$}Kn1yOI-5NV_I)j7ww31 zMxTubwJv85)dXhJM#}u$*sH?PBUfzshQ>XpF22tqm*qZoA)# zw7_X!SH9KpD_gSd#dXlvZ=YZ|V!CQ&tuOhpHTZfdzhxNf6X9Cmg#VV&73u4nuGVea zN$GF6Jzr?|rPBpJ;pmQ=OCzf@Pb)quY7&NkbV_T$EH5wXUe+5T0#&%HzQN%PXDD(2 zd4(HfS7^G^>bbq^+u)!ZG{zY4?>#B`nsE>&^%>1rYcVt1l+2tJqugU$2k*;n3FFO^6 zf<}6rEZP?|HC_Cn71ND2oy<07<=H-S%M%7V}-wnLEC(+la0l z{KJq_96rgT)%atY8i^M( zR+AyF6FNX}-MBqhAhep+gp40ZkegfEuZjGgAQiH|mx@5P+N|A!KpV9s zfGGw*&41o<^)IqF`4MOAfIshQaN0|hO9YQgh`kQeAStn91@~+l%kPL+ksaz+sEHoG z5hLEbo8QZ!8W@b;$o;J$wuXk?bYWoFb2Y#)B_S4j4ba9pHY(O^=TCaj0cxB ztHIIe0_(fE^4`PN7KmsN;$54wEm9<%g2k6?Q8BPfsWQZ90KF+B3d_6HOtVD z$#+p)z)|hSP9|D}WI@77bS+U0INA5``r(9xKpTY;{TT-+8ZaHWkHW>VXo3f)xczrg zvzd}~sCE8sMde4B41QiaC&8qxTFx-6Qvy36#&b1n`Ocg5m(I5Zu%j!R$b|s0=Vy)^ zqmrYs>Jmc7Kn&eA@^!WmJ%zoK&K2SI+?G1hjeNxkPL_x*!M)5X)r)w}L~^$lcwsvD z&Q#0Z7$CN|m^L91S_<2#q50yc4z0jSDL(JCl6MZ?(L};UEmpzKu$bRaiz!YP)Qt_k z-kvYfLiawe;{8zfa_EoPEl7HxArPgn98$IvaZMFPs{fBRJQbeLa7r2y)gs*#?e0)pITx>eWRiHcWa40G0bKd zXL_T2eHQE|p{(yN*Ahmc5=g!SoVM1MYMp#GsqFpr1{(z2CmSA7k_%wwZ9qj%jt=^_l5oSt zxzz%rG0473P6#2p-@_SIQzeJ6=`JuN8XD6Sn+L%9SKVXqY$>JJr`i-QxXCGh6Q%{fzi&;G#^ z(aBL2UdGXqe%1p9_r*K~pt2y3x&&&9y-*19Z7N;_D0Za0X|kKgU_e3etWC%|h; zoAYXZb*v&9O|X+VJX%%u4~P;#KQLaG36>j^X5wb!Kme61AAtHU!qI>2L6nFTLoC68 zucna)?RkD8_%sKopZ&)3qh_aue1nxn^aCAH4bFwYqfT`>W>%o!63R9<0cGeXj#F5m zGb#@6J#L)SmK1vo5CQ~rHH#RD9u#w$>o14G62Vb<50$uK9do-Es0Z(2$34(Lp*P*X zyr|^t1lG6kp~ANHoo0>Z|A)o-zau#Le}9m*+95p+al~ICg2V8?hitpz*rvUrL|=UNb11WL%GSLoX)b3?6O`tOayn!J*;P}jT%sLsEz-c1{o>!z`ycP ze2f>Bd07BL_o1f2xQO92_iFSjXp2)60@S7;55r<2KuK0nyudv7v^0VD{9fGqoPtU{ z>CWlKg^Qax7L7V^Fs^_HNby2_NfvIROSn^5xQ$h82P>?vyVLN{pH**Z`W9~FfK8Kw zmcyiuin{scnVc7cTleKHC%19D!t1U?Z;Q3h2!qe53JfmUsK=`_F4^AE;Rc(TX*Z*^ zc!1&=q6(rmoyH=sMi?m9)nKOvP&IF(Ch(Rf?6f+HeBXS*d>_JHi5%xSe3c7LtgG!p zS4v1YHxVm45_>JOPajW06mj{O;KsjbVAt69^QAs#o~I8;&W7S!(3?klLpoGt1nSJE zn$5-VJIxna^%6F-mM?58!;{WYj|_U5H0Cg0og1{q86W-I;@i0hZQj$yVBh?wP&FH< zTH3zL0=bA{vBVQ&uXtmL@Q>K;`ClWFz_rbeM$usFtn))9x%`1ffIEa3hpw2!>2mphv~Q7PxKBTxOBa0RMD!ot7WH2>+pM)?dff62O!PIF_> zn(;q6u-O>=YhC^cb?`F@o+w)o9q{`J!Mj{apqw@fEO%RwsPi-NN{e}{V7dZ0Lq>5Brs+oSY?(6mbe9c;d|oL!ma;yjMVrj@S!PmlnXj) zz3=^bbR$iIT7ygBs7Hhj(VItqX|~lu%t7_)H=Gj37LAglCd=reit}krQ|ct3l0fn^ z&LCCF_rxs+TlF`5xB>$kG!o5EXJBwK3;~jwH{b4+3Ie&|$WFBP0mq48MlwBBray9_ zvVN(ZJV5iglMl^9kXQ0B_{jBY%v)mG?WdtguOOt?&mp%*i;_kWNUwBc9fl|7D+fo# zH%t2D6@wc>U!d^*5uK7FKLZR~6d^KQ^&U(7`g8g8bJ{I3@F)5 z`P%-;?}N2NrxdT*Ha>Lpy=N}de6*=uA7PH2LNCi;f18RpHvx7cIKMfpnL=k9{Bzri z83xCGwX_=ntOM^y8@sEfWUVfaE|77sXCJ5y(@S@%lX3#7tM zys>reMr>5_4*monM#5EgY=pt@{%&|lytC2YncZ6#$uXk8{HGlPtHTV#+@_u1i9+}_ z_%$DG^o?Cbo$Lh%rJ3D;?T(=4`9<$S+EN-=4)y*qpCv4tvo6bs30m&!fyM#_2UG+Y zM!{G-YN1TC)HQQ&cxC~&)B@Zz0hXnMQ-Hi{H85^%M8aD5KlaYBP(!=nO~aF z{AURg(=rXqTN?psrIvh@>bv%j2-D>LUHJEpFQy0d|B5o##dF}E;PBTZK2IN{Trw7? z>ERP<8W{}3hfkgn&;@}Ac%Y*}fFws2zkj$H1Nasb)SU@>1~3I&i@GOx_!rO)>)h+x z_^Q6z()pcO0G69f0@KUoJ0yXFBz(!Kg9V;LN3G=Y>F3y3)Z~0i5{2}c)r%A5un%ss z6Tu0IV9`Xd4l}%9?z|!q%qy2qLE!zOl?UjhPf;^f6gy3Ttj@ODcZ$+yo;?%J!Osq* zn3M2&F|shZ0{fePzI~ddSPz1o-vqw(H%gvwgEhunD?F+SE3-8=0eUK?jCy{-hY3?= zJ}m-USAz@GnBib%x4QZByMn~D#oHTf=;$vm@p>I*x3A~NKa;@6jTX^lNni*+RA7J3 zc7Ne1&-`}Q@5HNj%<4sn-KuhmhxTa^VCw?#)~1)+jWV-a*!eMFeDDE1PXbRTfvd;8 zX9?%d>kOF*$I_dysJ|eI-`a@E+w2bOXj;P%qdFz1bB~IF{@Mfz>TIJSI_J=e3(qq) z7uj@DAi(qwV2N>8|h_>LKLpJ;DyG(g! zpML>cH^+TD#w#dB^E>gLcs%pebaC>)gXaKq|9tKd0~7RlgLwAp?H$|xr+ajbWx*f{ z+?gMVLgUvJ184)*u#rH6W^d^NwK&dY;*hb9PW++LZ@w< z*9~2dZ?@bYDS1R>4Le6?M48mt?yN-e>TKIJ7tXDt2n5W@<+H)y`Zl}b3oj)8kiguS z9ZA0*nzp}#j#|j&L^#ySAEf>C`QxoXH|qUuR3SLjfot)ov@B8A)Z^Ef~xK3|0@k-7mM zfz!8?O6VEQ@0tq|y&lPk_xAWiTexp>F6hJH)7KZa-%G$-q|n5^Yo{;O_lw~TLA7_y zLWnv!)Ozri<@f&AN;a7hd78U#7T&|$py#?h0buB;uU!612mFq=(9x%J%X+ZH?lYwR z&Mk?is`X2WKa~4X5+1D3I#0yjH;c0^X%v{7-~s9ZMyaYzBhBEpXSN5!vm@wdN!Pd* z$deV$X8&9ZN)BS>kxixVSOWk*+1y=m%)tp??}>K<&Op&vQnP^(RaPqWSKBC8dwoBI zV&L`GfX;U9-2QA(UKt!mnX026)$dNx=$W6qnk7uX`LU<{>j}IS^peHeL@Y&y zZA3Wqy>FgW0;-F1HUaoLY+W2cd2>kRI?yF_f#^_O2G`uZS*Kba9DeFxjzrUxXEe_x zoF)lSplU2xk5~2%?At%CPow(LE!ZW?<);UxquMvze_fU~6ya2($D&rSZ~Zz;s$0Xq zBN1mDZzw@NIUHlKO0`@cmuSgdmpIrhhEbO}`U?lUYoW(mb26-XT4y^3O@#j6?OJ48 zg!en#@3H6Og)xU}EAJ%kmq446KP8g>X-ivHmQ$_zF~z4Kki`fj2&a8E8tVd7`p)K7 zfmZn%rv~ZWIoS2~D^dtF@d=w+vXrhZ_*op$x6~5VLtDB!4d&B|^D@jpdla2*$bl_C zH_0<>${5-@>_8k~%wrimTwo?g3E`{*F1?;_T=DT*T zB%xT^ZN9&lcu4s8yNC|--f$4Zc-7gNoanyeXRk|y3(#KDKQhf@jEj9XPqrgDyTIDE zmaDjJI@68CfCH*z&=&sD@XaKweZxBV83WBFeQ*HN%rxiZm(M}iZ<%Mh<;qu*pXE?? zfv6uzE&^g=Hn(u3(wzu-8T_mx{^mJa<+Ppa)6j^z76?hv#U7b`!vc@}E8z?eL+`l_ zVBgHq2F9#{w=*fb&hxX7BMYY~t!vep^Yor_<&b!8ElHWU%yEx#XQpmwqa3(f7Pvdk zx}acVNNje43x_eN#C)pej8Vva`(0=8Y}w><5lspCf0F<7?59M3{KGt z7Unn5fj(7Ik*3Qe**AWQ13D3I0ABfwX0$cV@HGiTq z8y;i5Kh#_>s~Rpx!`-yd*prxxv2}px3{!y2ErnV+yup2OcZ&wu(rIjwT2MbIV6g z?cLtEH0LIEfBRF0xLmPg-@x@1`sMh}VP!|<-zf{0%C&u?#tPgIRf`5^6*kJ{zi8}y z^W@yg*COgg%m9kf(jf`8ptsM`oNn#$meb1KMEBUwu%mWq=k?fUWpY4w2M2Y4Wwr`7 zg>yqk*?^){Xkz!{JG2RuN+bwKCaGqDX1t|$ghn^df7vf8(=Wh1xEc32^E||@uD)=i zx>%3+!2(Pf5VBCWI!sTQ;cOy1HnbdILb+2}pQ@mdICk|y*olf^{s$&V-gN<{@lz6fTEZ?8PKLw>F z{m<2bR-h^`h1nK3>Yh^vTLM;#poB|@5$IX|-_59pAfq+6s3t(N5QqP!D?9X`3+B_^ zAqoPGF~CT^{YMeRbUwkgB8ywq{ta5fOPME|LfLoCY^eSJqZbpu7<4U@GAvzzW0atW zJyaq%q-XP2w}A<8h4w#JKDepV`%YH_MqvbS68?Eu>uM3|3w9=3p8qHz8=o(KScKA! z@qhbTNRNu5d7Yknk@COs(36O*+w?B`1nd8H&j+1CfTh>;w@UWS*N2daWOW4@D@a>sU#6RzznTof+Crqp`l9;@C4h#!ndiYDng2QfTf|g2{*7H z<#?Pj_xHRL#kYZq5f$Wh&#JKSRrW>cbPoy>^WRR`EJ_W<7~MoXip<9>r#jtwm8mGX zQogH(NOx_NU%9!5&57Csz95OpW+2$lN%r?^`GKQ4=QBLFGi*=H(o%u=KCSZp%bTI= z+7-a5Kz3|!D@oW(_%QVd5n$H?XL$>m4uY&U!)H{{9oHzHi6dcEG= ziE6p-I?R&Ch$5VY0C;~y?kUyR_t9Q67m|7B&AzL5@`y_ugS8y9oY}RTz)?%DGsTYw zPnS`iav2-M8xG}_-IQyw(bej~1s@M?UfrX7EOXGctJ?M2<*S?JA+$_!A&Phm=}~7y zyyYsQ?>;)-7^y`7G64pxRP-@^>UoqFZ`hRZ$Ec@m~&EAAn$Pc zK+WpvDi&+KCZ{(E%oSJC8tTT3lS>aW_HA^5^*NU)M^+;sEhW2IzpY`Kgd#k;vWdOa`OWMEP?fKW~@U%Q#nQ|Nxmek%hoO43_JxXW}xbI@K?M@AKsVGOOL5SG(hogO1rP0 zIg9!B%0rma^E_^SM~(HxMMN7$xB zMKu2SL0*&=fC3U0)KlP3Pb@Ub|4sS4B(Nx~J5%w=WjB?EXAb{IVnHUZ>Qf?cg6KW# zFY{Ae7LKjp-?LzE97BYr?_L^~kij)hmE-_yuBk#sFes8LI?dYI|MqJpYtxbktwGGi zAw42;U3_{q+qiF_#NZviC!U!W?uRo8L?K3C2~9nPy6fSbAajLLm=-HmBzwy!joYM3 z@VncJc2QxzWoM!M?g4tiLiwO|UA8cc_j}#o0)!V{K(Xi-d^W24@y{m1k9OuhU093E zkU%SPNg*R$Kt>C5xA<#|Sk9vIhDh893)M0g+^&MqIO*#3yTWL`IyQoLfk)WQtFAm( z+>8AnOOdu4vO5*vwC6h?!e=069H+3jfUYqK2ZSem=bEGSeeQ|0B5(iFZCKW?mw-|R zwi(Dw7oEZJ!-PuF^6#$#`>*5<%)S1*o1j>Nu;nc3kOB_pZ0*pE}8eii(x17FuXrZe(yNE^9Q;mUw z5#F<$sdEP3mqT5fzm0F>KLhE_QW9vEHL2*o_~sy9q6tiGpBxIpq00t#vmaAU9K|CDZ5Vx0*;V1FjBOrb14CQ(fm0I@v*th%TYBK z2kW%7Gz~HtH!qf(y~IQ@tYc>(>xpuA`VZ;JOKcNegZ3mdt_ql;S>8H9J)AUtm-A7( z*@pdT)>HDJ{d6;ww&5&RbP4T0pZ%L^eM(a|@ex zRJ6a{%6@l+Q1%~|Hm1D%T;DZamvw7HG}y1v_qfx|np$lB-u7!Ggb2 z(CE*hbBv8U zwHat#_mJaSLmqUXsfh2`TRM8Xs0SQ zJ0E~+L97w_?U&b+4W@Xj_56c;+T31mF|*QqzV+IFjXfLy35?FuUlNk(DKTdLnlF?! zJus6vO;^=dVobtx|MC2}O#haIi4H>$W8*8gMuNT}EvgUB8pIjAW0sbKe@Jac&KK8y z=bI1Y!rv=vZ2ng+fbF#tyB$ouQrXF+)5=%iDu;mX&xQDN8Xr=d;`29mJ3XNArn}-? zAw9o$JA8{Jx+3ZGINRGNNxu zlL2c$rWlY~=}q`ySl%S*$-PiI4_a78dj>L(0^fi?0Ld&1OqYq|_cbpIMPA<1BWx3g z4zbv_)4)rBk;wmrU)S0yj`%X&&z^7~_Tc*ZwntVVGx~kC3A+9M6@S?<)sWihHPemv z!VNKZV!IZZbG_cqeC=1!?PQfQ3sH*rOg#^4s%VnQ>`G*p6ok=4=9UDMHxl?Wm-NHz zDdw^$AO%!fY;QWYTt{P9iW7Nn(||3AfxuGmsl1$!ciw!+m^HddwRldxk45a=cHkv) zWYcx*Mfs~j$n>hoZ9Ir+t~M*UTqN}L@50rh=w^m5DN%%;(#u5^|Bjyc`2MEq{SWmS zS}?w@k~V-&NH>`#E&ad)rrW=PDkudZ-~|@YBoiHnX=yXEOzfUp=030hRI{Y5=tj7W zxc{=$*CR9T&Mpq;M-O#{`cB^gQ?;bFj)IQW1&VpQ9r&SsmKCwoQnq6(FFI-Xi&v5~1qgcoER^O}Y1 z_$|lK(N)Fc*yKEXwLTYZ*BQK!NgtVeQvjIpS$!55PT%VwP|#zYo0c|@`Rnm0&+~Vv==X&tMKEjC$KrHv&O>qeGbR)41uaPQ)51TUau>0P z6YsiL+|P=S1IHpI?@4DQ)wJb0ZL;%sV)bVQ|06?>@H7XPlEND1)w7ms)@0aM|?LLXDGK-&k+5NPCUaQ)2fi7^Zm3TAZ411 zDZ_*sQFVJG`YC4H)|Ets@t;unb8|(&V^2k!ThqpULwu3PJBMOCA8c-Qn4;fERyqk$ ztk@e2WT_i2GWGvdPL2}C+3#D3P~xqttf41iknCA0zDeWsh~}w_{QcqqwKGG_MYBM! zZFUBLXr1lT)2ypvq!+lfGve{x$Z2w=t$U+88HyY;8xbqu@#uS>aMg5&c7MbEyq+)4 zxj5$^mo|jlaNasFL}#O>r-1RIJgr)p+63q{6gh0Sni`6N(S`X+eK$qG`p}Fi;RH;O zc&J9#?UyNGPQ*n28|D6mB+TL;`cm7^G|0v2+S2mwR;8-TOSj?ETPqL}vYnsw2*6ja z(Nn5Kx*hdILa6ZfX4{=8?A|p>HJEEL0o6i}ssOkWl?ghB+JSx=B=32tJnm7qw>i~$ zyYI+30!kS;s@*r4MI@VLBid45*z?w92;JoEC-m1fwm|iCt<87w(Bc5-gbBoI+!J%m zG+UMy&CDJx%n~7PD4wFx^_5z<|3pxpX{tJ%-@<=DUo-PX`FIp}ym8;hd*@HY4xy>O zq9p)-(v=4y=rc&%{&vx2PZ;X)v4o(6gy`F9u`qi5ZBzfLn8rK+wgOlv0L1IRchn;tPmTVWn>N#DqkV%^W%FGHisnSx6n%SA&aPEGw>nf5 zL|lwyFbyu--OWKd{SgpTPVXUSEV=IYXH!m6Tx=b(ej#DP4x+yGaEHTzPM6RSlQq^s z-WU4&+xMsymB=x<{CRW-Gz$zL&i@fHJWI4xN?y^}$#j?A9XHP`#vIUI-mquCySrqU z@^JXnT_Js-Fu(7&J^-J_ztoD?MdhUyO-7PJdb&dQ#qu0SoViN5YrKUS^hd*Xo)oc( z`}RNYIC&(m$o$D`YH6>RGgu-Sz&hg%Tb@*gkX{`1=potfGMqXwJ@B3nkmUL_a=!6H zg@&U0kLhG@-(H~tIC(QPs_~XEL}xpaw(+cR-X6PHAmMO7>2K}P;Z)}vB5Xl@?=<(i zrlOiy5`4X`T78yFV;3(k$*(U8lqM-{5_M3_XTFHplK`7IHsbfi8QQKNd1tR)RQ9d(49jWmsspB)#?Eqm`?0b=xzfgCtO7)a|nQPTi5?CtGj$@s)V(t=kf}6?`T_3 z%D0-t+2En8L6n!yhiFbPPNGA0fxcH!P2kuu8{S)RRqTmXw~H2XrWy+d{aF9Ya2ptG zvS#zo5$($?;lScY3|Hdsn8#FXGS_d9B#hoai6(Qy^RTH(z3eB2*s_>x)u`F0tNz*W z`BMG2}1=;pTh>iVwv_0gpo#e^)Y=F{ac zH?vWR_;J>5)iMs`SH|MK!W(+V3;QNS(`I|(=O#AP!wPiYKiTM zUVLtIxoVU8dp?3^Avcc}$)%#N-I7)^2X(%d+j}J(upG1J{wMqJwz$Egc75e6)Z+}i zW8cSk-&EMYFQ;PlxTIINZAx_26i+A!H7{FPLV{^e)+TjrMU!rKvNAL>ipp`ly#=@F zC}LO7>)hYdIx*;nXa#eztIMUg*-)l20r2*Nv}St{Er7L3ocaS=>?Yykc3U4==A+eWd(}RDV@m0U^HfBy` ze)>{FBX*WAll^)b;1v3U591Z@zv4$9>DC!tXmlK7IHUY+el>8-^rqtYCWVe+0QSpy zGgQi7vmzjM}_ghk3QrHba9aQdPw+I4$kFp)Z%=h<`{reZvwL!!D`=693)7s&y zm$qA;Wt*4=`T15mX{WEdlOyZ9EBqqMr(|T5+Ei9e_Rb4GpKMLz6qlQcd3!w{Xt<@{ z(eBcxvce!k)3qZgbU&SnU_9884F=aR6$uOj03{CK(7-)a8RSFp?wLDbk`q3q2^ ze{kEBA__wMW5&yt+=g$4F%-lY3-!6w4}mI|9N z0@gWAP4#k%A%(1(!p(~4FqgBhosRD7-)j0wHiTwlz)ixz00vcbY+N>FnLal6vKp5v z+99y@>3eR+TD^?vvPeT>KclG-tezgZi_4z#S7N1@`5KlkGq1dq{W6<8`o9mX@Nb;f zua`or=n+Lz8(`huYJtVe+xXg(sM$ziA;CQ&=9h7c{$ZcK-&`TAD%#?WA77uB-_0Vp zZ;^``6D6u2OIELaQypztt5zsYmtr;gu6zaEjQHnjw{5*v`aa#sG>%;Tm-Q0ZFdMg(FV(&Wl_G=jfKRxby{k;YwnU5XvR$#{AjQELX^j48n?(s%G zD!ja|NoyPL2CFbuO86r#V7_YK>Qvt@~(57g}! z&mu0Sl$o=*H`-(@S$3}vDt<${vcAgzGkWv>8iW7IS=>#0Pc*@&(CG?Y>tp5loX z^V^1qqa`0M)t4bjgq0uz(+Q|Ffa9b?`Wf%X%_cHdadzYS?T9H9HPY)K&&!A7wQl>f z%b>_cR&Gwg$EDJxOM^N!)-URhSZ=+vzR4)dLv=Gwiaw52n(J|wwDH#oe=QbA2`KZ=qVZ>77+m%01v~NJJ+3qI@(sUs4DG zIK^7`XFnS}r?n@Fe&~;Mj~~134k9y-kL=mS9O&@p1=xR&7Zp1lK&la+48h7ubr%dp zRR_JL^X_*XMONCllpe{e+;08fO36noc9#Z7j4wlaq;Q^WZg-eIjAPBXgYbuL>-hO8 zgq6$F{}Pq!Mdl5U15uu#^ZNaycx zclzsP-9fc0{-*;UlgLy6CsHiq9vzgqe0ekV16F&}xT?p$H@cxuE1jZ5cif|*+BaN% z-!<|=8SApSXZ+L2+^Us=p^Ai5PI{^#+af+BlVEgT9-I@!=pYcpt9yRruexvC98x`8 zwjM`znJ!-#j8Qm?zLVi>_cvx0l^_F!Kc-d4ij+?E4`nw2K!GX7^w$v*Z%;ptQX-oH-5blDcM zhco5CKP|W86LZxS22p2%C$#mHO8D}f;8M}y88WNm)g$*Us+u^zIMGrX zixVO2mVJa&iN~#&*k7`4(O%6IzNpyiUfkbrYG0z?6Q zscQGA{5kS|pZ@MTERNc${G9QU9zxN^lBNkS*F_fmJ}c*9-(FtG{UO^>RggAt zRPMRZ7xKnWAI3Y4gyk6ZD66d$=Q+4@t>bR?#)@UWuTya=()S{FKK-PYNOW!26DQ-f zZ7KvGC}@O>v2>Y9t9~6_EjDcOf{f_wuJO}Kh}%4JT%1*%@P3`$S}xad?tWaqoZ#tZ z#*2LfzP#K;1fOV;dF-v1;rp-s=4Kf79qgk1+$ZBu34^Z{LZ}vNpWb#^K`4jELtpiS zYIiL0h8hZdeXDO=q;~n1B?s@AwVR^h14tJ%{CPs8 zi*5a=EL<)fxOsd(e6>iY)>YT?b4Ua$G&#PC$r_K#&btrk>sbrCd9_$>wkm-EO?I4W zKA34?zFZ~H9|5WSH7Vn%(V)UDoWfkvbza)dcE7UYRg>x{55Y~&a0HO0sfK9SysunZ zVQ{~l?TpWo3c1H3N$4o0yszPLvc1vqT=$f2WnPJs5uEN%UlloxbL*_l$?^I~?P_n< zuWF^IUE35&@>L{Q8YS$L3iZq6b18^;hr9aVW%f)3o%CMXdm~X`qkg&SV0YkQTo!!R z5!@%gKEm$6>7hr|q<%w>5HURQDb%mZ6%rEWxyf&-=TMRnrN*j2Z4A@p+DeaQSzVt>wfHw5u~=UD>y;LHo7cy|rNf zPf8Rs5ka48GiFL&B=HjaBPV&yITvNO|=r+Z^y5kAzO<#xJ zD`8*3({k?k-ml{*Ee~gU%j^^1y;vmcr~H$8jrG|FX(+oT9DkP=V*}K=<13`LzaNkJ zc~J;KJM$Igw2~OS&$ZR!f)qcNujP(J2gTkT#kFX@B60I}6u1Nl8iUvug7K8h{f;n_ z<7dgo`a1|2$=eSqlz3OA#_Y|0NH=DI0_+A1sV+@)m;l6~=(GG^S=ZyWBa>o^WCD+B z%ZR^EJO;L=1`I9MRtVl9lw2-Af+`9#?DAo9Wi#Tw~}1*SFrROV$Su)&?==yXD`olHmtnDJuDRa>a{<*d1-#NUvEr= zRmHTL>2F`M@2*YYIkq0xn&v|P=p%xkAmYFH#OoDf#4!F2{?6{C4!C-!+ax!)SuR1$ zB_Diz4S~`!w0=^Z$%7TmJuJ#QyMt|CUiF;5?)^@R0s0SSH^*)MrBc;q%@gy(#qWXG zO^m~gD1qIw#xT+8gh0lmJFwCL#_*~ z1WSWS0~^J=EDGBzKfJx0-IMLuKcac3&||?inY*@wDcvgH!VmWjcAVGN))Ia_?g9e( zX^A`RZj=@O`i$SdrR@Qm(J-(2h#+^=oiu#?hb?2_@&S<@0g3&teXLG zq0#f_G~(~~5G%nXM}@xAJa=v*sV@@~j|cFx*NCObFd}P{i^cv3CSDTRGUt7tA*=?} zVMASCU3M&59cOc7vXrzp#c-EP=Qw6QtLykhbguOfw$JTR8^CiS@T;>F^0LeniO{}Y z3li~C;=?x}nVi$BUD48%F$W4W)=>x0h!%KZrwOM`*^d-3qcO1pSb?NW2`5`_?&~Uv z$Jb}2ZkrHbU!={0FBQF`AaUC@(#a#gV<(>$CvW_>#W&U@4(ze)9r2Qw0V`WW?gg7mLlNDd(FDH`qU~Bnza0xj8^b zlYn<$vVIY*N+qP4A$HWoqlKgn>QsF0b3TJzJ-2Cp=j9=@y0_RUa5cOJYgG4#ZxpGq zo|_9@+(7mC_Ql?27w@*6;C%r+&1o5uYbNdGJ^O>khI2~3?>V_$psM3d{vJcoTW$t^L01 zuGEaP3~ycpo3CyEQ(!>` zvtvfiY!AcH+q_=qz>W%?&A+R&buh1Tk`JNsV*%kK$-UP2$gNjqlsn9|3kte&)u6e*5DC8W^yE6XI@ zuBv#WeLQz{3%`7PILk%wtwra3ijX}&N)5;M7HJ^E5ne-XM^>5Dg+aGr{d$v=FvGjI?_@tqMpJDY+&EGf zlt0tMLn)@1N}G&G?#Xtr3}FKbOVFlo#uWU#&y0TdST>6zP&&@O4CkQLx(G}9uufTA zXjDRO3JAZo06a=D;4L8Ykj9bzK)Z3>2SacD_J(UAU28AtNm#)*=QI21zd4)Ahi&ry zG^gwQp3Vw<4zup0_YKC6Xfe+@ezE`5jEpubZ;<>W6B?rOS^Md3k$R;ZpI^-Q)r~iT zT!~*plaQYx`)@BO`7a9fBh$}cJ0AN^4o%;4JWUB(=i40zQlAsSF{prv%m!+5=D>1x zEiP4_W~H6H^`oOaRgHXt3(JZ>;JK{^-Th+`f;E23sYp2t*M6fTv)jK4`&3)bxYKk0D%%YxjF#N8J|@rJL3oyos)0XdHV5YO%4WW3-k#4ME3 znG4Vlvig%Xa@%D%D=;ci42C4q9cS`hU6j1aOtLnc-J|JH9$2r7kRofcSE_snn~%F@ z9BM2-3gKr@XZh48^eitTcT^dq2c*)apE+EPr8wlx^_@%)SRyfo!_AfkZFoN#N`*Kom!_;qFR1FsnP>7pRQK8 zJy!}w1Z*soRq-3XALd_gx-_kSsn6>^@#O}jo5bV%rPzh@S3UceD*Me@nK(Nswp)S3 zkLN(fu+U4}~S&YG9x%pA`aB7!hJhC@6$aWF_CJ2~J7HR1pD4Fqb+g z2RTQPF4*LKoOT>Fs^u58qt=E^#kMefZ5WRujMG)R7MEJ`Y^u==9h1K#$8=iRb)fWM zI-$;`bz(V+oz=ug^?byCPTDFpvzlEgw{*Dgv$WNm_O7D=Pp~qSA(!3~t z(da{JkvZ_VXsWe?YL}?VKehCvJtfE9jE+6);<4r<&QFgxfI~!S48wO1=Xv*gyE{5U zVSl=VaQ4TNBd%8+XCC)W13~NsyZDtxq!`>E11~S#uJ*ci^Fng&#^kAgO|MUn@~gh; ztntJ$W|8vmq30$0QuRn8?LGrZUd=nax=Q}_WPZTxfl$Im`ZkK|WfbI$&vI6spK_r9 z;{+0TxG5$o5*iFOWAC@Yb-!AfeJegYE)D&HQ<}J1!x#8@OAUC*l{s2bGAyiPX&X@O-7h{^&^$(dIP?bwh0{5%oJZuXOkGNEm zBpe$W5+8~{WSXyVb@^qzvJ6w-C&zUSy~z^VyiDQYjbR1WkLZ3H1xbLKRTv~}diyu! zU*VB#o`@HS;+qLW1=S@Rfj&B4mSsk=|NL-s?u%0yy4W)>j~z1baM;;9-QEkU*Yc5P zEC<8ljTPx|&r#LXCngVW^@tj7EHoP2#Z$M<53(cb57%}roX??*E>q1we)TKD8!|0a zuJ{+PX2)8rX6<}VPKRss(b<5&3wn)vF-2DnW zrL*=;QlpV|wjY-B_Iv3^O$uWXDL>)4p9+T5gF}mqU!^#_DVfKS`|(~5&EiDncLI=; z?@s3=eBmrav7>cN>#YfD)lrq3ch|V%+Im&zCeU|M$SEDskqe;Cq5ZV9A~qY5!oE-N z8xB-@&jVM9r)w7-9Kd>Ne^uAvt6QnMpPj%S&ck+q3Yc_U(I3AO{#7_+7A&cX zsax$L5jhN#w+48tyb~jS3IP$pF&Che%*E`&$&jSdtMnM{o2#EUCsWx1KdhNm8OZy{ zs-so{S(TyQO1U@~yY`!$NJnYY{=+W^WS!rw$e+*p=GX=m4a)6Q)3 z`7!kxh9Th@m-|h5mZ`y+6?>yF}SG4yhkL*=+2vGMOzkvn1P=&`oO) zy7(I1SwsM5F^+oDtjpI{GPdpdKc~8c1ZZ;AG&Kf!XE#_RN|-Nsi>s5aa~qVq0vkCK zjPHQ|J+#qcB5JYNK`H!=X}66U^=W$Y!4x=bp0edg1Q-rAd3i^vlR#Zw!s%U z7Vb`XP0cX#9aJPHHpWJJ6?9(QCK>xS$)K@>3}Y#jT?>t}H5ns2Afpc(h+E2i*4y-~uTv z*YGYwI9vN}*&DxHx&Fu_M3bzNu6R#QLe$rPoByDt+cBrTGS0k)G{jg{t48*{`?YE^ zW&Pj_w5`%%=~#?r@ZH8@#kanQPS9c{uAo8&+ZYwc+Cf>k9c;CkYGtZubpuxrv&#cLTFCDNQ-giVqH#*B7Z5 zd2nprK9MV=8U&4s)Cn8)s&6%~3biCiw0v28HrRaolhoi{wG;$HrE53%@p3U@OXABa zew?9@RKA{cx^C;(Zd9Gs5Qka8?~b_0{1(nc3&whh(7FiEjoQn6xaTv4ou@a(O%}WJl82)K8^V` zQe==~{?@taPorkX%M?u+v{li9Oo+)v>;MSDpVs_?Xheb_F_v|)~M zCL(m`^B3}N*XE@)<{j-&WEb&W1Oa;FP_^OQ^&WK-wY>r!rsE*v6kfs@&@rXJo7vv8 z>9;f%z``$tFFXzbE(usGH~r4ecp)lONKmob2#|_gLw?CfgTP{6SKO?-T}?;{m5ig3 zI6Lh@2gIjf#K4dL+*R?Gl_}EFl$EQYtm%f-A5iWi>zh6?p}QbQe~P~W0|%ce_wV3a zAsxhH*3pm}02TwtJi%Nje|z zmDye}T9TdrB>E|8D4pM;MBcU3*KFEoJSm$WJkoKJId`;j>C^AXYN+YTd-QL&(%oFl z_7Sx_m@q#2dGAq4{3oeT4>hAyS|;5W8U2acOTR?k#>oG1VZ$wXHzv97z@TXA(AnlW zw}IAcmwc=gAH+WWT;~%cAkiE{j%g5iDiWe~;N-e$u`9ARF$K ztnLx7(EOH%dF(|X=wvXxmBpg0^rzpC@_U7f1POe9_kT5aL^D*QbKDR;4QAMaYWbyU z4Uxp%vC)ypA%Cpn>F^ArbD1i{s7t7;2V+r%QB(y3_f#2=1oz|4Y}mD&Lr3Z=va%fV z)EMw3J91~nM+;a+*qTMkj(+npQpF=AZq*1pu zYA|V+gT0FM(@{%CYDQ}zt*V#WC}YtEAfHcj0tqik8DC2>ou0z7>@OmK2)nDyb&I(} zI&$Aa_5t^8l$ezWcIx3mh*G7xDrOLT$Uk&DL^>z_ptb zjhHySR{iY%tYyN)@fJml*3c=9bd1cs&E1-}88njK2~^VKT~9mLYfC#PEd>mXdI#_J;_i^t_X9>{-NKg$VRRZJ zg!xr<+yf4;JyD`@sZS{NBG8_*C$r-;rtdsI z*xCvK?{o=viTEcC<5ki4aqR>&UY`p4R117P44RV`zFU4Pyi&@aD~tHTOSPhBtsr+x zl1^-_@!x{ef+eKIzesAyyO-rULKXB{%_@cfv(u`wnI5;~`zAlzv(^Z<+84);IhbD; zYD7+wZVQ&6ODppOe!{9;I!Au`90S^al0S0_@BU>6&bWky1?8=UH+`ESW2!7^J#d8q zGw~D!0EjrT*Qyz?=s(glQBP5dfWHONayNiU*{a{lKV=C5E%u+Q3;>cWEU{XBCS%W1J5l3gd>(HM^)?=Ij_54Erw24Yuk6}gM`e&+cW{gHWoHke0gM%RjF3e^zc z(eCy6#Y89j@_5=`bP+xApfD%D`?`S|Zll2QyF!0oVxWF^UQuPQ%V z3Vr?3ZeP}Nfg_+%^4Be1nrFO0J%a2-3%0E(BQ(DAr*dQNRPDs&)=1Ff(Ap=31OSc4 ztoi3;2Aoc~sMtey#t^HU&b;!w*()ZzBwC)LaxA*e${e&Edj%I*`frMrd-DG9XaWEr zfg2~l1_0;H^HLRP+LfV0Yu9M(#4_V_=`uV{sZg1=LyRA~irk8(1TI(Z$k3!vgF)Y_ z=~roGR8g;;vCV)vqrl-iu3K`nh3e_$7RnoqY-!b)O_&I!Y_7wy`NBvDR5;Pk7V=$w z)*BO+pl-@zE$^&MXS%}N8Vt#O7*R~MIr%M7E#DYRtHNwrKL0Di7djS;CPC{>$u-E3 zOuNKY2grA^Srh0EaltUHkT=Dmr2x^tgfBqA`T}-}{8spQKp*pHevyP3c-L(SfpFby zeMPx^q}H1J4J{arHpJXXs8;pOw}YoThb3#=H0SZXBF-dlGV$PJI#15;lscF0Ares2 zgls-b_%GY-r%qwQGBsM+h$iF;>9)5;v+Q}%p5bt^J!aG>RYIF)PduitA?>axj?}~n znYEC!!(cxJmYw2$d4= z&Fy_{4Y5n|bMvsLM9^vCIm`3#y(#KDW)GD8=&^=)X3%Gd&GNHdm?lBKF#+e&I?DHg z+$QO^dC6GG&L{k|krY`w(v4Pc%bD3{X!Vi$@21Vj^4B4jNI?dti)}ljnCz`d{e@c% zg(9NSD}^hvKHi4)pVto33?;P0-K{5$I8#uDn|Zkt=ymAal^a7%%ucC&C`3IA`UgiJ* literal 38399 zcma&NWmuF^*EUQDC?FwHQUW5<-KB&yBHcA~=g$AU(i) zc|Xs6yzh_i#|H<;3^Ti~wbx$zJkPavq`I2?Q`{H0XlQ6p6%}MP(9keU(a_M>9%BQa zOkozX0WXg{q!qOu13v+eEhB*cpSUXMd!V7=J^S~EKE4P+0UwHZ%ISG(x>$R9o4Z?~ zIhlL9I=Xl|+F85=Te*AKxj4V%<>cYyWq)bw>FFxU#r1y+aJsnLa0%dD_oAV_L{pS~ zr{$BezvvyHrG5W!GHqk6p(w4m(-Qfz8w1Qz%K>NLt@t`-K}At=q=%6(p~3r50)2aEJIjB# z!2MK7WtJIKUn~18Qh|_|hhgn+;K8%_@ISNr<-AC!0J53#5d3EH_x0(I z-V{=tK#|1fhhTw3E=)WOcW&mc?P+N~C@`4S-~AK`k5DhJ5+X8O&Y#G3nV<6{Y0!Je zbFryCQp2Dn@bE9;?V?E7xV6__7IJ8nKPJP;HUq}VDvio+X zFr^2ZhisF?-?xtV#rlNn8Bv!XJT*;*tPXS?$>n~IK=pVxXUlEmg03YT`rH{Ayd!&c z%!-d{*5xciTpmniH~NF`dLnyYo4!6GiC>OZz+1uloeiu3yhH}9Cc(C{iZyP#_;0>npxf9~a)$rvVR@;ZSY;S|_-^wr73I#J4 zI}je@m`RL|TrSbSwf|asfrh_wd>0_9{@Ccx?d+CAXLg9*ck*(~`nI||J2Rfq7dsdw znB#0Uc27r16@GP|B%@E?wtm1+y9^A%Ii~#jcqT+M`1x|J9C{F>4sAZ5M-@X7W2xwG zQ|p?;0MYkHJ9LK+DSwp_KUa%2ke(-2gJ2_WMF(RDU7fBenwdXEq@dqC@@<)8C-hmyFij^3 zCTlZUv2MPGR36j3rbpwP63&V~qy!!Mi=&#uCGUe8k`9Nq@9`nOY5O;c!Ug|8=}2g<%0 zqFCogqKJ-h}D%Q zhnN|@#ntr+JjfDEYjj4|7DVI7a#GuJ439n%)}l4|ZQ{g4MdDx} z#wEC+tt&WC@?uk(=a@&`5t?cab;CSbYmqZZ&~7!Up(Z}bZpvrB^zWbEVlWdgAD}pE zhrS(>mR>en4;;u0gO=JaW-`KnlrLTE_7& ztzEyv7AClw^>%P;K02>EURr~3Fx<**P!dP}D8dc8Xg#7q?+K6UcK&g6sRI~*2??`o z7I70_+DCgUt40zZ+Debr(3Tl8A6RsY(jNZSHsZ!?2IoO)+WBt>XA@!tm_Fawo|D{W zrUDsUx1wa|?fcyk{nucFynrUMot1fr$P(W1;u?*!`}_YsT?O4yI1$>i#GRHI-IDV* zC;I%WLFZ*3)A7=xdwTGrYuExB767Z6N1IlBCgaH$ajb^jf($P%c+rWyN#au)q zC;hC)GWiw>>-KTEyh8-1ETeCs8OQK&gruAh`2UK=)EbO)MqfU{X^e^}9lr=XcLWDc zCT>EK&Yaxx0+e}?Dj3|z5^khQTe?d&+7XsW7%mS->rYLn3wQm)K!{MVGg&-|+$(}B zoZ}oGYjSQvidPINR~IeOIl6MB5xv~YNtXEP!F6xkZw2W(#L5aq;TSoe*r$y;z-gX z(O8Zc9xERYcJ2|)ZXpqc&}1^xv0aa533Xje3J`BFTd>VB z*pv`WXN|_kKp%JMM19t$sg+N}FX{TlQU#_6!>QueOUY=V!Pbsj`k<#r!K00^ZLw{q z){|%CtC?ffBu6V_y68a);vtu{yJ{;AE1&Hl-!g6H)V6wyL_CK%4#NYujU@{iX7R=xta+n70lxBET*(dXGdID42zg!|9p#hdFa z;b$#JHqGR*VgadTRWvdR&7DD79&3B=Kj4%JAldP~{a+}_zFy^ku@Y8l12BRi_98%l zguy)|qWaVeB=yN#rHinpnS&uxL7rGdxp~jw9Fw+9Y(fD*7*Re?Ia_6Oi(T^jtWP+l zFPmQ8Mnp8fT#{R#itO!sDgiI5<2xRf9c=T*JAAgxziIm%4@I{m;N6NY+3m0Vw$N_^ zUsEa6;IoTZl!AP7v}@V3c+2K!eG?$0J}(~;ZU&hl(F<0MUmjIsXX>%!v45w`PsFXk zz6_uQ5nPcwojpa(t8||$puI-p3&3-^N2@-?$^tM_k~K_&^*1}s8PgJ}&xZ~%IU#i$ zR!-a$wMkb^rNZ*@f1Q4^`XGB*>hhP~)#a;v`6I()&Ye586Unw~aPS_j&uQW+`3L;k zbd91XjZQxBM$T9}5jI|xGi3o%hh@JE79qmoKzT2%@b*y$*-YNSZ)*c}FSo_nrBW63e75e`lh`4{N!RX?-8vQDZI!hXA<5rz1)rcBh<_^-2=VgkAo~ z7cz7;$xE#K^zUJFUl;*4Y0XIvhrKxRBl8ycYox?tB{0ODr}ax;a79wMClf5(}jt zmE?$qxbmLMu2#{kAILhpqDQO{o!~Db)1$(|UN^@}etCgKtvyGYk$*4Ic=yr^j>9bPApis-yMXpQ8yT z1~HdAt_Cz-9!`I+WzEz2j_l^SxKotD+;#S^G$E6L&g8|VjMph^zQ@r*c^Ne>xrJ7`XG zHH4t(M2(_DV3u&DI9mS{F1L})BK(#IF?kmCW=XDbS$L?N0ee3bY>E zo&0Z+4Xr>l{f7a+C2ffD9pzMX5RC%UH_)!rS8z`z8S-U9AJDh)J^gfYEMChf7u zZHa&DoQt|P?0D2 z5}{)xA$h*?>afXKe7C9AW3hqxY*Sv;L!r<%?;7~k`Xy2#9Kn;1y!QePJ~xXZ-7bnd zA3$u*V|EW3q?oLp>&Ulmj;9}qY^_V82CKsaRar>s|Bd5^wc-hB;l235ovRQ)3}Yh! zqp7Ayn*0mO{}1i^@r$h$$}@TeGNwXONORNbvXEN+?~>K6pmIc#HlSPM3$Fj1p*akD zlgAJPw1WH7wJp#&^(k&tWZh?%-OLCJ=#QBLhXM^HtWm&F@1B#HV$&B45?HM|M`|cd z#{H+tac$0je2stN|ATn`|KO(ow}_Uk@V8dJ1yyyW!mkmHq=)|;f+y@;dFxxviCFB1 zs&;|xuqH(yM(+HhwxJ74rBt(3Yit(o-n`~|gq+Q{84&?fKQxpCu>X-_`j0vq9dlSm z-w^&Zo&E)`Ju^$5&i+RA>Hii;0kA-dKGMu0!UeL`3IZL8y<`6jD5gMhy%PW%mawz5 z%5EUqd{)YNtj)e2V2Be!F>yoEp*+PU!gcoIA5^m8p{)EyGmqi3R6TPIh1F)1WNgU-tgPvxhZEVWmM~m{4 z#$F*-ADop7s2-mB_}9M(H>svo@HS2i|W`|_I`atV#${l>aE)S{`O2@IZg@mem)jglbhR)*3L)ym- zVaW9*sm_+SL6)-C3I7#@7E0D&cuJMjlK9Uow6G7rl>9_x+NM$duV#^2(SMN|IXO z{zCS|ZPi&v_}>W3!miKoVx1JeNJv(QcPgFn5~W{$YS+{;xWB2`Ah(!CX*{)<)F7z} zj(?jLk&79GZh;2SB+Tsf%>0(8M_LEzrNVI0{-7FIrW?#l^K}x!B+WbbLI~%%eqMt- zQImIhwd$8#pQ8r`MY|R@qcJR#%&m>4S&8oXa~E-13(fKVIK%WnK02gvqI|x~S4b*f zw7km;xb9c;YOhJ}eZv!x15lKY>!4Ze?fH(YP~k^ANT})N5@%4@q!5DD^wsNb@8DDX zJ;N-P$<36979K^X@*d{o%i-yb-sbJTzgC%2h`0XjT0ei=X8Q2Xj&Ln?HpaGq0*SwJ+-Cs@;d40_D9`q&%qH<{b>6w(g0&bY8b6}gL3_r)MjQ3q|t}QVh&N`C%X;``1eugs& z_n$c2)QD%)gP_+NTTQkWRGi@zjg+^cKTIwb{oPIp3L4_prUA&JUU>zvRUT8lI|0q2 z;+H#@1!chKQ+5aarnREwp9%v|X1a;U1J-OMYqScHew@wQAfc8|)jDch=7JD`gz-+X z>+^Coakp*CAusk6+#J!EgVRlNb^1=}cvGTWBDSMRA$k8LR#J=mWf?w|N!e)l8_k26 z$2Nj1nItaaWjXD^^ejssw-2w7&-bIopKq@Zi+!Ow(#{!LHLm^sJJtBUX;(NZjE$4s zY&wi?_HtePCb#=Ej_31EE@6!hy^bN|T6>u5*DXgkQo}e7R`3SiU)r|@^;9~mDzb!a zrJPQzvGxBJ6zm|T3ZGwnL+7pjZ7LRl;1Lp!DGiYdrfsnc@HK2f#2ta{@&r0OskCO7xs02iL762&ze0vUJ zG?Mo&;$JW5nwH2;Or)MePHAdrxck1;rqCqj*3{_v_&aVu8<|YE<>7jFiLVf=#^(I`UQ+!fMp<&bH{UAbH%qJyr%1@KQ}{nTj776d3Ny8+`j}ip z7}q)W>J%^5TF(q}-BW1~DST@XAXjeY;7v-iB!d(`;v3qJg+J4 zcS9h30fg2|)x9^2f>h2n;OkT3F@C|HlAmI7T*aye=m^KDNl7=V-1>~%gG~BIvr2td z|Fl;tso{H9B^Fh+p+4oTH}NC1JaW2q9~FrUPae2`^zzDF{&~~a=-pyPv4RUU2K!f zE?<>37O^1S;;gc9v@&RHwin)8yMsmg15`fis)+g_hVVKPHbJn55^FSA*8`$Aa}s+6K~gd8kkM(;zw()%7-vGdxcs zh{w-v-7k-8GrkOHgFRUVS58WUCbW?9zqL2ro`--|Fv$5Faic2^XR5<8mjhL(PV22K zP1Tw#wd&JzwKBC1Puv^&->`AK`&EB$<}t!0EJ~n44svP*as@=~$qD7mx#dB~J7H>` zw-YpUA9e59dfU$1PtTpS8$lqs=hAo@=V-#{2{_}@P_v?)I9wEOmpT6f2=((lzJ8KE8(^F9Gb{2PTVorvHyu!(>~2jZT<7d(HOFI@>7wCCWaDuLSVz7We~!q2o_$J(`2`YwoOZY3RIMJ{+ib zrmQcoDK+@1bW=Mcez;vfj{kb|RjhV-x89@x?CFVH{O`b{|G^tgx)9Z)4PIx_ z8u9{@Un6>2sxq7SY{LpYKH}7Vmle2_`PW`rNs6*S)#m@HnrK?&W_v;!k~ZM}tcP{R zq9ipIcG|(Fv9WDgc`@K!ZgsTwDdfQCG}e4>ATLe)!5ufve6m>lvD5^yjLMhI6nmo^ zu6Gm?0=^cZHO;Y-#bf+U{~4dvn`RZgDKw8JBggAiHUei7W_I0T$op5|^F-vh~f#>{$!a-0DqM<6{w33bdV8 z^3F70fub(U*29PcMQ(*;0KDV$+k=%xCX@$m;O3myVYl5#H&eCGr$G}c?zu|j_i%$jGe8;& z@M}@~+qpda!ztEo2|rvRQ3@3wHmIs2kYbg);O`F zEY>=A0Hr2My<5?v0O>UyYgPuCAismbv{Vpr2{Gf*?}CQr$mTptx`lys55)GpgM4_C z$e0pe2`%fg^ZwdExq|V?UJ4!A*Q!&J+9S2?8 z&0*%o&Qi`d&YW_4+X>$iefFQikuVama7TA_k0EZhdrvV;e@uuPjv8L&@+Jc>^$f-W zAYfBj(vkiEjC^I)*Q3%>&F4LI2h;2^rg77ai=hv^9oaV$OqpqJF#;P9A{R;0irX7| zd);`wPjK^%laaX%Z1rv^{h>eoW728ns(K$G1k-Ul1EvL~*7r`CF#gNOLCyPfFQnL* z@~lwhZn~KxC;c5XHAj{eYbc}l=PvT$T6=yJa$hDs>=0LW6pPrvNP{}svFl+@Ad9!+&oy-Qrj z;N_#tJJ{tIjj#OMfI3XMUT~VI*J8?Z@`}I%Lh7p%zOYNDKBslRi9F*6qC0FaA<&Wr z@&d!(juzzK&ELc(kEfq(t}24~zJ~7<1PQsMbNcYnM0RWX#3p7lvIlN%sq168Wd7bu zx#+y=wEMI)!1Wi&LDNOigh*_YPcxI#)8;!{@k89}%^*l=&{bD3aY{LA+e_WLuMm9( zu}+SwT+6e?@8FMJJdwdS7`4Vmv-9HEgWN1=g#ZZ~45l+hzt>-Ehw@B>?4rThXFrOd zIPIm*dDW(>a9UxWqy5>2Z!(6`JfWLe6Zg`B19-{ta z?f5N>{M0tQ^0kqKvvV*pne?&=uDn^lVl+&XbJbVmD}k_XlM_|&RipC~9&Dc<`2Mxq zYh3fXm@B=f!9Xn3@>!(KR=~(3 zo;5y70c3!ALert&CmdHj+x|Wq9nK5q`smYAcg|glVEn&3s_Tl)E0PP<E_s_p7yZ zeRr!ejnxj_TrPJZSM@ONy3-lU4dI5<_U%+(-)C6YBCqnQ*U|_xnjhf)IZXrigH1UF z7-e#r16&V2lWq7_JKmfcfGqAJ@?0$9S_PfOd&WnQtyGygUaeXDvN_^xA`vcu-O*2*;B^O*weR3 zhzbviVPD@caq43^b=~y89?LYR!Ips&d-Zc0uS&`S+o!0}on;ae(FMYdt4sdEv=a#J zn?GBR&-2+&@o=yGKxE|SG0{=;v7EY;V1NAUJ@VjrIroREH?xiS{w2)zct8}IjXMGX zXt91k=|@S#Nuc9U{(woS&StpHt@a^$wCp{Km93HoWW|ZqZIxT|rtDO|V*b(kNqI#z zW`WubVpb6(75agMV6zZ;Qi9c3ZcL`C3FJtm#|U}2>#KtCQ!~pY@T28*p8lwrhpoG=SFhF; z6r>%&yq6`6ob<%5VNH=wD{Uvq#tF&J7JiD)y_)G|WzBWkd)`QGPiuU^D$Z)7mn(&E z1IU?v>k1>;w$(>SXD>UE80k_*?_ytV%TxY>`agNmjUAwg3;F5IJZB!h3wg2U6{cfx5vFis*MdK z)T0M?Dql>b!r>W*AAtkek34BMSKr9D_o%4f^D$^RDUhz|52l1|hS^+9^dOqMxdxc0 z4(~;oHcn5SlG?_QqRmGQ9Y2+A|A0gGx#d$InKUC#*R6~87BSWZh`8`A&%NK?BhfnO z>REwH#YO;{y9RTkx2@)FESd1b@B57SEyvpHyl&mczyG2g*shq`uW{+t&0C45_a;kP3PA0Cv`N3AROSD(vz>uLNl*UGEZ-X)g;X<6K+vO*ObHKz?yf9_i;Jl_-Tok5$sPqWVt<_CAs3m3fQ z=;mt6AkB&f?$i!IC{WSprfreDB4N@GOjczin#qvvZOZuC@;r3q8w@-Z^;|f|{sVZs?z9dT5%R7hZAh9C|qV z{2p<>1E@N!Zlq1g%^&J4>p_3fV)t||gLk|Zt2VyM+PQrUogT=T#owJ3WbFwQ=(C&q zuB<8xbKRV-_y3`@=uYzVd~xm|;H$qlbB3K0 zy4!T9jN7XMZQsSD?Dol&7|^K*4ZFyP;@mHOR(lopmDhPdtFgAUT*SRWdptFA_*`6b z@``Re#s(V#f80qLQsrlkC+7i`qkXee^sLKeook;UC-@=r$L*=8H8A0~ys_$@xfU?7{Z9q&(KjCGIr+SOp4Kv{OOemb`gk)4mMS*FI`W(Y5j3xnM%D)uGMNyz?|`tVVo)A z`NkB;M2l0z4f%bEdM3H=n=*PxaQM_NA+50V0~5Rg*X2)N@tT{-kB3u~uMgnz`WFqv#EuqK8+u6%(Rjru z<3!7S`lEG(`+9Wr^}1XSU%772dy%>%enj(&?T2TRudaIn(F0mo5(>$g;-LvHQhqJN zJ>1Bz32qTgE|No0x^$MF(HyADsUN6=n^pXMyP@x6`GPqh(h)INZZ+a+w&KX}_yifw0m|rtCplvx1u5U&g5CO$^KZhn~&4a}22m0A1tW_-% z4C29adHbS?RFxirU$#S&9`?rW9h#sVdVK1?9j_d_l))-anpk%0%$~y!>QUNb00u8CtiWF&#tQmH@vGJv;Q9 zs|3AfVoJ!YXz5=LU34yZbIW}I4r-?aVH<*5k-68Kz`nxRFXB$LUGPwJ>X#dr@@tr8 zq}jN2qbn0+FwVM!OgfZcAFf>=93t@BPVZal+NC*6#8Jn7BAnW^2KQt=T#5 zL#~zmq?=j%8zlDP+-vi?S=iZ8-~Zx3Ur!ZYj&&y$Pa1RW*v!ENi~&ihQiCqDs;h6q z55Inxt+y&gMP2!fvC_8w!B-xcrQk=z0&9*~ZqFw&?GdL)e74t@sn}=3M~xz`6- z+E=CjES1h-f$GzM?{WhEUW;Wcuq7-R&m}MWsVKXyx0KJ;_LHgMW)G6z454Um5a(AHb28L@!#S#-VoDeYF+EBR-GBPeKi3&f z4V(G$bB#C5*o%%s!$9DwKFLKQ**agd8pNcbv#RqEGQ+=3fv@N-?;y| zuz90v^K$~_Wyk9UIp6B}Q(6S&VtUyDRV}A3Xm{o{W%}an_k*{zagkA*uX)`wfR4Af zCNvt3H>OZ)gHFl7yxVSATN7=U-9*r(&%E%O`NAiZ?$J0~@3EjrMsu>`wuSQNb@l9v zq*8Nk-+7oHNc~*O_L|w)=WoCf*HZ6ICk~LTrOo1v?jz4Mx%ZfjH_rV_7t2^EfU|U~ z`8kdb*XHTrP(0>t#uA()A#=zh@(G~EzfjcFpOu6)@2lvHWXnYp~KA7Ghd2^V`kEg!%Gs}kr(JZN&AQWa>hQy}rDFGJ{VV3yvF15pPBnxy#kjZUlq7%yjt6c_tSolTRXvOE9$$+M!}G}r0%lG{^L{GGY{i_E>3 zNb`cAxSWBRN5EY3obAnJ~{*`=v zoynbo?-Ahgj>H3&BQE>IV)^1+rk)D$1*q$lc1983#WP`Aj^*owBweybWK7{AS1by$XRiNctJv#UEt^O>? zk>KO0(qt_r(ZpjLCP$V&^!|bG#YJxPo#AxFZ9(Wu>5tCVzF>6i@$S2t8N z|7~aZM(yk@!4ZRQYfqZtMfg%L?`4{#s`O@)}42PrEb zw96BLw0bR0ODHxK(6f}?`okfBemZk1nhzO+D+=Oq?FXk0eC{+JEG_6dwPuXTERX8Z z^cn;vMM2=8Q!Zn^21+8vR4!H!yj1!v&C5HGaTFhq;RT=&++KR5f)7qwd+t0LCCk=e z#p)X=o#xxc%$%JE1IYu~H_+kF!$l_&k)`uf^(`GRcwoAn$EmG_p_kdVa-Cp(OH8-a z)ZUb)g04 z8iKt$xH?n)Y!^?YyY7!NzC0?CQZaeQQ?H~LOOjiim?CM^o9!GqxWtw2R&{xP`>B;S z5{O1i#YhUFXz!*zKrvBZ&XWtMIw*5_lxF@;Rx?aHd9IX>aM&RF@y4e08H!_jRFtVA zz`q=&6iL1YO+Pz=#A0zR*(jpd`hLJ38CjN&?t0{x=_$w@i^zM|gV%hp!#dbanG!s7 z7%_vwaDnTpnb03Om7gtZTWwxr4f*$eA??;Jm;1(Q`gF>Q?kE@iScG|mN{-M zp2xIR;ybWjW3Bda$IZv|OdLzE)q8MaaAKGpOw)Z$(pCR)g_~?@r$``pIAq;{zXyoK ziz#wFtJC8(AA-<~te#T!O!1aVNCe`^+1&HTW!>QR4ceN}$y}w71p?LbRd`=pK=VZPg9CK-4Z{aCwiq6d z;OXExSxc&=;_hN9za@nf;IL7w?h(O_-!E#V<-XxfykqK_-e2ek*Yh+5+OTs_Xj`gG z%%qn~UiC)(cjLcyeLYLJ(-bJYqFR^Kj^L~VPp~@UI6fZdGv1O4x%uNr+di&iYrW}L zGTVZEzyeTsW!h|47kK9@PAkZMlL#Fe5Uv2Rr)1BDOB&l}y_-Xn&N3dyr)BosS{yt3K4f4E znOxqgqg|a%mzh4>+#V@!8d0&9zoBoet326xh_amwswc=6#vT&v6vYoSVf>S5-q;aH z5)ZgX7~ERJ-BwH}VrnCjn|2E~u`VnQWm%Y+^~nJ+pMKsaL!PT61UavD=* zg3^OvAEs<}S8n|PP%j6lPVQ%NjTApLemrx$`XYol?0$DF z5Wj;sC^s9->*&orq*KJxd7ic805?M{rsbJIEP5T-2yNWcH`s7MNTUV@6l zGa=i(KC}J4Md71E;u**IS#$mCi=2lBJ0Y>k>sQi%)`9!5k7UCs*s&;53Vr8wecn^c zIqdI6*~jyKx}Ye;TJKJF3$Zqo0VJTkmIJz`HSgc<{v`LY4mpMKLxAmHFef7`T{?%Y`b8s7 zmCN~KEk)bQv7S*l?|VC^CVq+_T@b!6v0kLf1aR1dOY+(FDnZu$u*@y^z}FKxmikU9 zG+qkv-6E_8T6Z!rg&9yabbRsv`HkxTPXf#Y_O+i0?y{X-2i~j)y8)fYGCa`gkkokZ zVJA0V2=6WVz{GAWdG})`cn{C z9YLI_EK->B`N^GykB3|=c#@6u9R?pe-?U=f6qr*?GR%?%5DT&7r%zwZD=S2EY^H?} zY&pAQ*oIBUUj&4H1&*Gi!IwH4Jf7P8Ra*N&75WXWa9rz z9N{SV#*Na#iD`%zv9e=evhG(9Z>l$l6-Z5biuV@`asuogVw^r*0qkmaG6X`I6X|@M zw3LUhLuJh_16LR${_R!)9$f$Y=Tvu~{!g=}?^Rx08jAlA2)sfM2deIsRl)`S?0HOb z8RJbLUp>`f+qJkQb=LkXK6X8b1P^QN?B+Dh2gHyjqNSw6z(k^{wgA4Ef35uiU^L%4 zP~w6C66v1;r9AFWoa$ZHPI7CMaJ{QK^792e(*|A!JgnRhu<$sF?`&;%a!TG_j(NGh zg_9*k$)=f-;!OAaaPSpKBr_!g&PvR*_;SpXw}GO#G}5iRc^&IFp6$=`ap1}EFk$;Q@-voacH zOERVK`WD)KF*SVGYBhhl)BkoYbSAp5ist=M{WU>Ulwz!~xs6EDIv`Q)bZA zRW<19#vV>4WZBK*=gRv1^IlTa`^io6HlR|2_4m)eXU0!W%58_s55&M8-QORLThMH_ z7+=OY%A4{dN`TvGH1)=uwVV%WpQ&04{4ID8bZ+99{;|Lu|IB(|bw{Rs-q;lL-5d9* zj1q_LIjv_B#>M8mdBehS)F1bounDo^2Z%fDC^wD=iixznrP<-?zQw+gp{ZiIyfj#w z33tl8W|sMNXtxUWcv?#SdzY}G2Z%u7lQ1`l#SPLOjb}0MqT%)wpx%*T%f;pg3vb$u zsEH=Nwd92Kzl}yebbMc2>SH$Re#PNCtG70*aN*IRqJz{xz2Sc|2 zqw`^2PC8icxbafO%S6(#x?&R!*2&+G_OOa!=CQwq$~hIoOljz3fF&Ohxn1vkz>i?h z@785PQUH=?`_w~AXYhraxB%pF=3EvLu(<7NIFNqM)4YBKNLg9n@Jb!Ie@*1-LYdVH zCupq9V%%e)xpj-k$&lQ<%C`sO&*V=H)eo8jq_MKE(Ao6GCr%?;wn8gszP;G$eJnmQ(Rbx$`sIU0qwFI)G%cPf`!8=hS{W5X3%I-a9&q3&P+Yzr0ItV_1oMDsG)KkMO_~OCDl;?M+S+1_q%dI=vzz z9uxau`JP~EUR|hTFGFJCY!B+by7!voulPKyp4f3vL-&=Ecu4PqQ~B%G%yeJwTwa=r zHTILP0)xLAVwSR~doqEE^vHWX7siKMU4>Dnono4><>x<#ysWSblM4hMUYRdt4l9^U8 zvILO<<85bChIWcPRIS%v7ifw_s^p*58I6XQ9mL^z@o7nDO(&Udot87+5ls`Ea_X?s zdIQ5+b?Haa4VYBj5(e|Bqg-Ky%T^U{6d>BeCA=vG0rS;)v+vH>7<&}itpJf5G!wmB zt*91U?=w`+{R|Wmh8@4y63OyMUoO&<4sEuQSUB;*tpXyH|7ZcP$C_fm%Nxv^$!1w zIS-=*0_&X*nC_Z~zUEqW)6KI-U3AKlrW=l`B_KE2m_KAaCObM4uC&t7Za z>%M@3Eud%= z|0~w?#8+Y}q*BAM!c|3)+pi@^llmL{%&G>+e;41RKn}Fhw+@S)%+Fge_D2>;C>`o= zMQ2tv6W?LoE5?YK+?;Yld9lyju#AP&YL$(mW_<~Ak}u-fk= zEx=Wxqa1#2J&w_bTIn#rYo*w@9=FeP-gTE1iiU?RZ?_I*PS@@mEGt$exxDqmi>8m9 zUl=(tPPpq0ddHI$UcQz&_|p`@9&}%#+y3j91engh-=BG6_Zi5|i2W5MHI@>23^uWN zIWVrb{7P&TPEkQk)6~^8Z)B`yb~TMsdCi0Ek<+dBjWnc(O5@dubMIPRgFi-`ZxQVf zpNY(4K79l^HHMUs$0UqPUBgG|^5fNi7dQvuq7bnn)$hiHb2X0(f3GXb!0|-yPahZX zqQ*tjn5LL>3+*ia)5?OS2rhAx0(uFb7b=t8P0D2HI;7KelCpsdqxX#^r zgU6u3mb)c$_mGzF9A_PIM|5Po!sIy}1YWV+Uk5sbIqt^DIWdy^lA9vNWs%4zBV+*6 zya4cM;J@ms_Xpw@M*RC3yylROl)I7S|7RpYLvB~pXTm+(mjRC0Qhp>p3qPEJ7e8^= z>bmrkV~*qQmH+R$7yl0$9RL45MWEp9aSTv-njPXHWBP^o5+Fc10ad-1(H2}y1LTw9 zuH9BxnD*?3xYexncET8`)|C{2m!Zm*Tat4(34ib?fHuiNEYRWYbR+tVwYcVXC6j~S z^2fUm&s=gs?twsVxa4$UCZ8!>xQe68IO-F9C$;JtEOsd_AuXLX~^Z4F6I;i3;Sxf&zxhhN&pzip+r; za#M~wY+xQ?S^H4X@I~JxmM>2wtJ(s-F2)cPV|;;m?EQGn*B=hkofyMBKFED3lhYhx zdMSlJ)H%Ca6tB*!dMB@Tzqkb)3q#27*t+W-Fd8d!_(s2QQ!#h;=ZVY{B=60GqKb}e! z;9-D+T3lW{#+&E=(OT+U*|UUq9hVBSYD%QQT#=ETzm?Gr?GXcwwQZ%$6y?q){pBMQ z0n0$iViC1BpZFjT`RuMFKCHQ;Q^%)<}>UbYE!1YR=kkP#&rp712dl}Ya zWL<1k4f8)L6|2l;pavWL<0{5q$w z{*(^pl>k}*eQY8e0U`{%4NEhLUjSbOf7jI)SNi9|WK&=6I8F-j4RK!T>T9H)QFe1y zHbi{{G7NMO?K8>p`;*ubC8b5S5wH%7SrM9Pk52swGtla85{%N>u~dY19xoP$W@0mb zGLU(^jlJ{Wtl@|7wd@BO&={34iLQ+;vn>lqknEgf3V(pV>+i0&WsrFiP!nUK%ZJjb zId!wqR;;~w6obF7q6ViOocY9|1k7uZkr-+B^?(fMQ4HrI*P;qU>HEYq5ag}hZ0)W+&9wH8LeQP^0C;0N99r0S6CMmp*_fV=UNnlcYLJ-79ct#5_ za(o9l#Z3tO9<^*$4XE~MVMK=Z;A&G%yr1_!2UQWErQ(kwc&oX}*q%NH{mI}3gA1|Lj7Hq8$5md){Pg$P}q=8JMd2EdWLLUKRLa6#%EAQCb* zsymvgBnL)0wp@*0I5_}RxjpACdW{w z_WiDMLDH{4o;3@6sHH2}TP49RQ|$Br+*Xds#O!zlK~OBt0A9v@QEUl+DeN^5p|9bxK?Zd`^wf_$hU2q(n!SB3VjnN%)>V}}rd&2hNyaU_M-!R% zC#2^!nP<%h`df+^G2cvgsu&4f2FI2;m zTj@ZtWUHS10iicMGN!La@X_W?y8)i?b(aGPG#eR>Q~k0}V3D=I{XGUZ9CvI_51}i1 z!mD;nGb|t!6uW$RE{?=loHJbVR6Iu0?C374kckXqs-tnD=^#%2H?4joI+^P1ze@4v zK!;T!5l>>Gu#w)l!qPHcIA@1ukMT)M%&~OBLx2*muEwQ{W!8UXCO=5vnm>^^8Zs|h zHiaU1`6GBcyQpEjD79zw)a0uI3)n&7@{C9X0?VEbvgLo!stp+kha>c`n{f3VLsBvx z#Q;oGN?l?T=yJoAg+loqlT$(tJB zcbcpooBK7+T*IO5^ceYV`#2;RJV!t}2^s!du#ns`6}6ONewaDcV1DkGsOmUBvz z((`>L2f+oJSLr7TCV&7FNAMz7cCF8p$I+oZ5}>Lfgv}?s)#Y?Ys|(r$59w~HAAe{) zCNQ3ii!YF7?R>~>eH`+qStVi-y!87)(;kb;WHhMaO?k3pE|9lO&b>41xD-Rwn`ego z0nkdxH<*{MbEGaOzPB%%U^LK2F}WbVn@1BD4_%IlgZi*7Q3B>m&8zMN zKOPQXB;Yt_bJXL{7nuM}HoFwY*X0qxOBfK@`Uf7Ii3i2PyWl2UK<4DpwB9EZp%kX| z5c(O#rM%T6*rmTHHi-urkZFRwzYTJ4MKI*X-;3at!s1z?|AyT}eEcYeKcN2$RC}9i z@tsjMuleVkZOuwWd~gxKM)Q8rbur36xEhfZz$B|;S)WK z7eyYE875zZUbzpx?Jz#aa}Ep;z;@NbA|VqY`0_f0zl(f%SIZmMqn}!1<#O`z&v!C= z6fsI*(yotWB5m$z*0L{~Qj@n1~O&Rl~_SGA|#^S`@GV=l_>4bfuTu1SAVD|qkh2s^sNQT1PJI8;v9Bfr zFy9Vol6(`i<$9k}W04;_$*u{GEgB+U_c0p4>A&u1ab!R1dIN+$JPuFs?UP3WyWG1e zoE+)_>ydkh(?=gg^+#ca$Wt!huI?S%c8+Y!zJil&g zdF}B%6`qf9EN*9bAUs7wcu^vYBL059mD9pj21~4Jfa6C|2A~%8t^L0K{-0$`N%!e| z{(OQzH5J}-BmWq?sc#qb&MJ@1^Uuv&)!pY$MirqKz&-l8in z0~WBlk}US^gq*P#e9wyl=sFzq0bUjSI~1?^UHlFYeK?}yyv&67&q_-q0^Ug~K(#Zf zrBjuvd5KlrU!gfgyO%BdYBkTRry>Aj_11>B>*IEilLY~4#Ir13fe+w2c?(q$Wn=4Y zZc~{fhC69UxO~YBklT3_xb&UZl$uI&LLF73XAr5*9`xmoGlvA@Hc&y<*oOP;tK5$1 z7dv3U|KZ$t3gGY;Zh`%(FJAJnvF4Tl%pv5|(cN2*IjP(O%XKSu`iJW`v&R1%eAV!{ zK4&TA25jvoS$-uwGvtu%aZhan!S?22aRSg?F@A%*zFI5P@!V@3*4tldw3$85o-hMg zV!{C_;g~>7j@2nfO_=Gb;Dbw#SGMdoCmVU@y-uDB2tz8zt+~R~4M$zV1$zd2mku`3HJbwAnBC_si>=TGPlLvTl_L9DeSjVW_etp0Jz#E4 z-o7J+_i?aOd-VScWI*oEtqQi}xtDf!)re>T2tMY&ybtWNzhhKHlC@#RBF#Hf3c!sj z{f38v%EM3h@-$#&l-fDKQvjZ2iDs=Q#rjhua*K~MKd@Yz_O**}W38>R>XKX9keB4` zzJv;cbH@KO#LXAQI`6=45%*K)hj{jGIY_U3Kn!)M&Hx(^90yO&0C>&6Lx9&jIo_s2 zmnLk*$r43t?Med>j=W4U?qg@D|G9txa0_xZ|4!^04u1%|{Ldx*0C+ya{rETv5%%qH z3~17)N2se&Rx`8ba%KVNx@^Fs@MWskx5>%$*g%Y7JwH$xFFg>4wJ^K_0^mQ>p1}awT? z-B|84ibXvo5EqX3(*hv*(FRC<*u_ORC*Kig@c~Z!Ju>u!A(_ai``ofTBf@V|c=Jpj z>m_|AU1D{2Fw~dM-9Rj(y)?DnZRdfc*Jt!ob#{P2f{lWqiyC;xG{$?tlSGWr;;56B74!{(t|bSAW8>oFh@%78#9=r=U~xDzx`kxPfu1*J<>Lqrmp zJ8*ZRzh2`RyfeB5@uDGsRBBu1Wn4u~Ys1L+m+khowu8u5!sfB~fqz+(wyw_=3U#y3 zb7K?wOK&xB!x}Tn0hOjX5o?SS+SW`E!TYbl*A;M~u?Be(O2(|1;G&B9p^a^@x2(=H zt(LCLz)}$fJSDxg+Tn_ZZ%JACLU-OrSW;h=$=HtQ+=CoH+W#IH?5BEl5D@@8g~oq< zjlv#Yyv9LuR|?+$@lV2%4!Mdqre*6W3I4}XkyX+Tigcr~{C`~(;?Su7PqeR;n+XNL zfjdWsOikx=M~uOYp27Qp(f|52p!VIOU~2u3Q&#`8x@1ajok(D5Q;01-lPHq^gcT{s z>@4WhosO#@8vpC0f4lQWJ_L#B28&zNO{X_YrKL+<9fy(}vyBNUi?x?dA5aN?S$Fi7v4#R>&!rKusInW{U76f zFXQxDr_(?9rRv|Mg8M(4>K{ZOU@baM#>A$Oyb1H@-n=6k!v9byblEgFEx2qwu>;@e8!G6d)_Wp?L}S;e+q+F$l#j&x1!+N?4c$1~Gq7Iyp9a7rYul?sb4v zkONki7ez?6=^c`Z zTcghjv$->p5xYA7t;-?pqbpn@-IEa#)=uaK?V$3)g4<$rQy-Xjjs@|2o#qNhj(jc76(?=C)0WnNgUY zbioqyWt?AuBL8(uR>2>|uybyfU4jdo$yF)`BW>i$q~3K0>q2#$1|I4&AhuzXi`%|N zRUu75b!xx$FPT9dO9eT|K~+JYW~(jRob?lueit8M+zhoz3fp`+WB`^86dIOb#zql= z(jmt+Pm*eR_LPgv_=VtLdB;?C(YP*mdx&@rvCSl~NvL^i-4keHtD5**vfON4*X1rID8BGw9Vqv=(q^%t*`+mKCpEiG>t;Ls5B-isTT!^1lTyn{ z2{)7Z_iejwX+goWy~kDN8NwgVhAG|c=0_az9t>inL*zSbl5(bc`u1jPEvd%PSu(Z0$;g|Ksmjy<39&!S>U&J5O#b{t>E4!BMc2dlrrN z_)AXCC!|)fhXaU;^Z%(mc9?i}rezvkotOa^V{xV2!SZ&koM8T2^TohTlWgSJ#&La; z7^o<)3Mc68+4dYcvaQ7le6FgD>zXN!{iYoL3qKLOxGnzL6GVdKy%J#9iWt)Z2hXmV z0GGv6?vC^K+}GC7>TUR5UARyiMpS5VtY&aFC?E%*kgzPHbKFHYk<{yC`E{z-nZ^Oc zxAGO2xg?v$4^Y6Nw|e+WlPFQL9Nu zs%>_4dIlL)Z^FGAea>-T_f>pZrMgcOA~3L(io@fbp)Azy(*l3A^t`*ToMATJ$JI2XTq*@jH3L2k78%=oZnk*vO+|WhiCsAWTm^@$d zgWk0gi`}e0@%Fm!6bzX2i8cm!xM|-YR75)xN=RT}sPYkEyYmNL^1;CnPb+qkhSz$Z zHRXNHdf(C{a-RQoU&~MP_`nFLUCXjnR zM~(bgjJZDJh6(C1DP6KIcVv{(P5pKhSffiEnKtZC$Uj|K9cB4ZRQ)+D@x*OR-?wF;}Y;W?)d{scfL02V&|0vBp`UHqWDcUE|$JZxdsd777}r#DR; z@UI{rw5al7H#@|D+d_g)hbQ0V+C;x|mU>hSmKwd+7jmQ`wc!+@U>M*I!AHT0;UwOl zyypJHw{y|=SY7M{E4Caqp$C?GTbp4+516IpH;P_NV@`)Og6Ed4Vq)as7U6BLB?5_j zG6QO&L2l%`3vGMK*3*qvlIa@T^&9njL}KM)5{LMo3{t|>Zis-op%mX)x;@4S#MReD znp!}0xU`_2KRfr*6Dm0i3y&HftRa=d2lH}H%@~{{Fz@)BNP6iXJ#zTDIljqid1e+B zT-|AOZ}?gITf2jW^DSTDxccaJ1#iN-inUbILS$oT8*Z{b->Xdi{EeMNE5nMLvev8E zc=9x6v4g4IhcRG>XC3!|BH;cM&U39QW47)uEePYyMUJ35HX(DQTNug%^SXLk0|FPy z5+FgyEftpMZEN{GukH^}c8M#D2BiRUK6P=eUdweJ^3^R!l72`0mDRKBhD){!NmxvMMJP1G^<+`!YcuTZoxxBca-mc=)IfZ;L$P=yNuT249IrbA>k zleXpdr(n8{{=EZ#Qd16zJ2_=vyrunQveLN(^DP;_&#t{w-^rO8mml7q z9{HQ{WgHXpaks$qI*XKz=PJgnj8BDeD*3XU@!jQsOR=KHLy)d>b1^*X;^jGRs1Ss4 z;Mc_+InZ*;!xodB+nclmTq2Dfp4vyEBI3ixhMpd&Gx&|2v&y+2i^toApY53N)P5ROs z+7=*LA@eC>Vy%Q_UL)K3;x`aRG?A%Tdts9-vXcT8a`R^YQGZRwwI&rvqz};lWT;@Q z8OAzweZ}pBtf_ourfYxaXzrPtBK7Tra~UNLRlCi{`$O6JQieRH`JAVYw#s?!sNB4Z zP>AP~I7d8+WW&m5vj|SJogeprG0_2IGM4yC#2UL9QCZl^-_sgUBvl`)=WD=a-Et99 zMXcDvOHw9oizOX|$x`fY(}v~6YDPc9AsFN-H`)&9Y^3j(TDq}5;qFcZqU4<-p9_euVLW5L4)61&pLS$X?*EG#CiAo`sG}kIS&C;wQ%b*FJM4*EV z92+~S!e8>;4erI5abX3p3su@DzREZ(E~^cCs`|Z_ujVTWoRiXqt9&Nw-RVGs)DkT2 zOR-USWrH>Q26P@meC>E3?u&eCVL-o~$dlu7a?h#I#9^3Z&O=+uiz>|)SQ4Q?{|s<( zM0dvm!f%!pNCJ=uZ@kP(A!snb=t}$PH?1LRCy)+9uM8&O0xtU$fw5*Hp|u3H)Yg>} zzl|3YkoBlGM{lj2etSAU86Muq(IQGCC!&(eWj4yTzB|% z&f7@}?-X#VEfoYF+eo~#52CSwD_W!&2PZx_*ZREsQ}I)J#-rL;U@lf2G-FdH3YVO+ z!3LvlqA#a+r)*sd%vk;Fb#Y2uZW#o5ZbFuKw|f;!*V3LF{MqP&ydKb0BCO4z`r+^y zPgWhYia?>^%U@T#02g-dVgDkqtpz>R?fI+;5BzD=vKTRFn&_I<5Mf;CkC()4Vrgvvi{OR-cagzR`gXpHuPTg zOFrAaW#XvIo>+VRxE@be!JMRv8cSWBpEeK8m?Az}Rd`v{=X7do(4E-n-0P&{FypT> z=eIUm(@#x4wh$_7Mq=mdfj2yl(GXp)C53eM0)^hLDTa2(A(6Yo$OYCGlgc(p!$S^% z;wN@>gb=0dtFX@g=u3B7pdhgDdGq@!b6sI`xXOJ&T8JP{NRM^v{hyW&Y?-Z_7kdb) znSGB%dhW_&LR8&x2ci;fFc?c}xKa`d-~ttA;N-n<_Z5duJG?u5UR-bCVRGYp_uwZ=Q+_l%$C$jpDdd#tz#KyZ$tz<<-`5s4*+A;56S%B>=nV;87Es zUi%pWQMC~{^f`{6J=MJ0TBa~4(nIjUw8ng1XnQ?wDoL7 zS`7aKis*@-y;3Up%_{j~bXU@m4;YDW6!Ea3rn0;x3)n@)yB=|aph}hI;O@GAsz9!P z4GZeF6{N#$zUbU>@io(X$iLT^a^y9+e-Z>zp-JkVYgd9^>~HfT#IW)6_7LKJq+e;O8RhXq)oMBa`N&Iq-yFfHs>8geLOq@^4%h)|&(| zB`-~bD2M`~*`JT`A4esZ=eE8f6S;>@f1`sFcQ5l9Xg-3s(HCq;R6^L{xH9z1ZZOL1 zrn*aZTQONsGJJMDp3m*$r-2NKi$B)Io6rs^I=J0-(I^lPbXAnfH{E?#zy|Rsx4CgZ zy}2OQtvgM;jTqf4%T<*Vo9(lrLGbVw4n6IV2UI%AZ}BKCH=DDJc8bhp_8A4lj^_K* z?Sau|5~8BK;q#QE3t_$5i8gSz`C$tqZ0t9Lm>=pc(V@bij9KX#!)z=iP9W3rfT@wP z=w?yX)f~l^;y#ZE%|Cs(Y5vP}xB^HL0BN=mB|Ow}v+^Y_WPp~;^O3H3gePOYvxt3Wu4oiy~i-(50pB2b7^~;pz|xm_PMbl^X(^1Rh6KRYSyiLs|DBFaiIEk zpxMPW1x5AfM64$L#I@w=%yp}_baC}2z5ZnG=D3G+;j9^Y$pA-cc6`HTBii89cgTsv z2sY59tgkK|%qLfy3KB|rleueS2M%S(z!-e9P9;;~5)zaxNAm*lewX%E$j`t{xgqus zZDlX-2F)Y(ULB-7dzrr-cmgt0XM8e#GI#)XOA~}k_+1-`VZc+Vv$vlq#>Y<=(;MvF z7Yyp>_ufaNJ$LKymFK;!#VzDgjW5ryo=4sTz=(p>J#VYouvrn#0gtNcz5Ig8l) zd-^t8bA;#*B+N(?_1z-kW%_yi`enuqbDj%H(f&>hZAh`w2I3w9pA((eaixWC3KoK| zX>QSder3lQa|>rP+&Wt%R{QPWq4oiYyNG;g zfFcy#-@UiF72v$ycjcyikXgd@tEwnPbu!*zwDmZtW#<8)MN$pivPwJ{y=%+z9X9nv zLjtRVUDn#I@qmpyT9B>a=R7N;og4CUn0y6&qcEm4UA$q{;8_bErvWDX=K8Uq z&mjQ@EE-`9i?Ti#KKIi(tlAN;{h3tsiN$l*n)Je8Gif}^+s}y)tGeYpMkli=e5gCZ zc%vY^V!E_=0P}+?^px}a4uRfd3Rn<(c?AP+->#?S^i@pV=zu%F5hJ0U zWAj?|9O0fO`gra>^;F(&7EYMID6?)9^-7yw)e{{pnhC8FVbpct=(VWxk}Zo#wVr#> zEG40KNxp+B*RAfyxA_C)3RXqE=p^ak=3ZX2@>qZy+}`{;@Yi&Lt<}C^g^5Jt=>+d} zqlBiWsE3ojsD@<8FY|*2b3dJ~O!1Hd7SiQ$iqtbiocow(4{K0F@*_klIJg5)KPE0M zZl*cGQ0LJm=It}6G&8#NU|olpjqM%gMei$mAK3suoZYMUsTP62|B|Mu&x4X9rGA-h z7S$Q3o_VyxVTgEHF}sOxogxIfn2?Y#c&dkCDx~qSYToV+kw}P-uu1!ECwXPX?ALJW z7G7+VieXG2fRsat#9+>t0)2+dtfyX2Gs%DDX7y)+BZY?X{GhT|zn%|84`xAmBI zCJ{$~=C8TyGTKXsdH)u!FARb)(yxuIY;@f0Op#sFh-FEEeOuD~R4&1I5zRr+j(3f7 zlj;Hes56}=d~_MTc8LcV2#XF4TO5web7i`U$2+Ttr#O3Qve-g95u4DoL*(;+Wk12$ zq^S}KQJw}S%e70=2Rt8z9c8%f>Qavyv<^GI3PId{wF9^Wet9O9##h6m=eFT6Cyb}$ z8Nh^2X=`QU8N2n%cqUNHB%9!>4Vy8)>3qoZG`?+(XV2(Ljpjq0{I9U6RYAv59vn0LbAm$W_|7le#tN!75V)#`E`>)F%+yqH}a5X`vT82naVeWw!HJwbaTnJBXTG+ujx-@7#X7x-PFhfa;R7~R4J-SC{-%Uyoy zmVfRA484g})9Upn?JJ&JGrxAAxAvUSJ9-b*p5L8;S@U8&+=hc8Owx4sFD;>)X59s$ z-)F05tvc$!2fOf3=Dv05MO7eCGXiFT2^Y4@xhlozJN(RSs`7<~-K5d87i{(Shx3ID=D8x-W zmreS$uqx7x(z0;roP|A2BG!|>>a4re;^-u0SN!zMAfkSY;jn@6rmVjsMs zrzxs#4i^S`Gq~-RLJ^YhY4rXfCSQBaZL+RSF&*k1y7-*>UY{eY3w3WPNDbx!S>Q!V z5g88+tU!4(VlC|bVS1NanI0FX)gOKbyDM|}4I^BcW}Nq`#p;i*Mz8kr4xTrg8#KpB zHC-@!k6k*BHc02rg6GD*^WO$| zfFE)DyBs!RyM(^H=+OGpRO!N)KBn&@=1ys$?*{ohdBN9J0i9ltw=s+?{z4c?$amh6 zl)a(hU{VY&hSvre3f*LnLIf@e*)CSmOYA~LfHH3qKwD>s02GUNkGbZT=yZKHzU&_P zl)^bH84SEY%9aavhIAM8DSX2lhU{DY_p52&4??+yhn!Q;2IkAUbtK7`J(@jw0eI!~{>pQfD$RwLYOaPLtqxtuU zSx(9iN!7|?9iHr_Mj5x@Qn9~F)ggMR)nG65olaY$3Z=-GmaFgbjxmaEHeunp%~~8R zeP}4_`ri(Z&2jzw%fHs$UE}s#Mm9Vw9gP-Mr6plE^i{D|zz`%x`^tIpZX@N&XcdO9 zNQ`|AghwFvUwJFz4(tERC|)R8E)-f17Qe)ORpRGvK5x(p2K#;g9xm$KZdMmN%FVHg zI8ls=|B16e9;!dA6>}sq(ii~2oct+@Cr@Xwg(q1g8Poa!@ zV--cVDc+J5g>heZMD`N`>9a0s!E(K~2VM4s-1_}7=QL?jP52%7rw_K@PlUQ{I-dO| zRX0$htl*NP71`zASYR$+n!Kpbfzd-0YPj8fZ+o}X7VPk)Xee&cr0^)!W z#Ni*)N!Tkld7UV!U;b}(WZbg1Wd0oMYNDQTC~r(KN5Wp7wo*U2opmZmB9H7hxFisY zYj75k`eW%bWmsJZSc|!e!4aHwZ^VmP-P-V-WAd>6;`R$~{sL7U^Ep>;jaxC;Fl^Xj z)vRt*lI3{1>5E-W^O=;YLwC|184a)_kLRZPlcLUhurv?IrX&zm+f3FT&0=Ozz@Uvg z@yqw8|A8ybFMQKJ9f|PFd2rssSM*;-0=p^rHoRY=e-X6lOc|5;Nl~jb4hBn%ftjf` z%+_d`?AOni_;|Cov^1Rfrj=6E4~_t0y+{V<27&7#{SLG=Lw%x#J{d^OOD6RJSjw5?=8F9Olgffgr6R3sqECo=S(L` z%c|yhN5U_SMC&ppqSH>3jXj&CE&Bzfe{CW8Ghut;FsgN~^=dz!<@)~oAM?M)m;5S1 zOG<1M!UgG%-HofiW4gW(hp&%%V&3gFft-rZtby5ETkX5=gzdd^dCf=d>C3Xx>6hAw(2HX zPg?T()aH|-;e=NgiIl`D!YVXxmMC3}Gqt~N_l3pBRn>3y;_K8yGaBt+^Xr&nCOQ7I zh=bA@mP2@9^<5e!-uF;x1YQo@ndlrD?;0+dqjO-g7q-|+_`2l^hy%UVo@aRaAwW?@ zxGzlPS8;eU8Xi*WgD>ZzvR3pYYB9cAHM}>nNWvuBJ}!gDe^7Pd(Y47;LBT*<$)9jm z`J?^AF}^ao(zj7Ln)8EH5d{tWRkKbpZ`&ZnKuD`mA6hXP_z+UfC!THQU94n;R z@A;Duzdrb;+zWfssG4$QX4}sacEbxND!&^CDp>)CX)GaWQOzR^ot*Q<$O5m({yq5* zWLBqC4979CkEMjBBu-HG#+2#r)Yh~$`=gkg9oWawCHvF|R}P{I?-t~}$NTJsos9%f zUzM-B3g5uX=OW1aIEtwkBqA%D)5K3EuL_=mWvC@ec=(~7l+6TEtK$~8Z}bk}l-D-e z+JY)_y>I;(f9ZRjhADYwBXt`1P)rQDbw4X%AJJ73n%Me1Pf zdl0cK2y&rdsjTeIuTBQf^#$b}=t#TWzCX#}%+YW&A3ISj=;7eu2F>qEZXA$*c)0mk z$H102RWiK|?B{C8y7oJ?1-+eY-`A$4+Njxdu2mm_yyiSUfq!D>niv}HW*j!lO}M%) zZK>>YG3~*X%63MeFe**aR#C@bvK-ew);2uJ@&wuWF}^hP_*Ikz^jQQZu? zsCbw2N9Z{tX=hQi#X)Oshx~`o8zN7Di>~qq*g(|$$WJI!*3px~&u?1|?(SyPQ@`D_ zy}+_@)N$ah{(0BvAXSyW-hi!oPm;${}7M zCD$ko_!)*#@9H1V2ZK|4hNr3h$KZ@k9G%Sfw;AWAIo?zf#I>JX8VXkmg|hiwGb65M z9mSzCEWi5jHxdZ4dc2*giBM`aR#xMB4ft?Fn=5%5dCj9P5=U#bx*P(;ft z@MeoxtrlqCrq}DMKy?pAI;|yH5E^Zn+>9U4UzwP$1;2WE-#88Jq-JqFk$evHPW;7F z!WqIQ2*?E;!gCvXa=8h=s#vzhia7XKbKxw0 zGO_xxGneJq$s-*}kJQ26$b*iFHi+ zN<=Hl>AL^n{V2YsW9~&umFDyM?r(e{O%>x0R)BEB#UASdE3t>MnXa5^Yzsu*wV zd1Ja3Gvlhz_=2gAU#lKYj-XYt{3H=!_hRvXL$yCQ*=>o3dAsUma-MDw)!a}<3LAHF zA1|iQ&%lj|XQ}-Bi%l4>#qyoTdln*^987hiS27zi|9CMnL>v2A;->Ryuolig9*@MZ zbqz*Qp$RU4anza8Bkc=Tn+4e7|7pe{D}&<6Kg?`iJh&sBN~1wJHnLq^7;s-soG-C* znI5Epzw^xdj;s_G7Dhn@z2Gkl`UE`h|3l{L*Fc2=^%Cz?Y9{6?3*V+AEC8*ob ztKxoPc_S#c{s<$xLiowh?yH0HFPYomQNyF+x59W}#ee{i(>I_!R|f|z_+FA9~q{3-3xlz;Ki^z z%{IBiYSw-XDr+$ukbzM^b_Sz-P9Ob#URp#rkO4}k6Q^-fQ2i$&cWSak)ndEdj~AC z1B}wzU^La{+ItGj>F!+Dh#d{D#QxXC-6#2!|C~`g3==>EY_KA6w3(jkqhV7fBbBc| z+rmAq&IqYrJQfE0NdQ>ahE2X~%iDA2yKm7PoMkW@>%|3|yi;YVg7zy{H}CQXUN_Gb zEYEa(M<3)nN7Xh&bP!BRRyMXPBTCc<%eBXdtZ9IFU2oom;`H@yemw_fyxf`HJyY^W z_GN~VtDf;vi>;W$7Q^BpRozkQ_^!CP-$O*S6qjRuN=FWWXlFCI^PKaRik@3Ooo;-i zQ?27Ts8#cQ!2kjg`z>wRuq1eP#G!mtO!2zolL`=^mN(Pm<&cC1%JN9`2&rDjLKGyI zD*G>aS6~(gi@z-#T#)yQ70G-1O0cjfI6ug|)g)?c?T;RZCGZ=6WbbyZc7FesYhCF) zSOv-WwWSR%tw0Q;Pf$Qc2?Amzs#U%`SyoCX3|HMwIV%094~^V29M-TOT0caJXVbVn zNHH$W<_8%RNcwv}|KB?D2*ZfuS_psO`}v!!4#XxLd=3Lah>Ue~J*b^iPs2BspBUz#L^ zzZ9F;VD6C~A+n~Y_xb)4Scc7FwH3YxPsglIofU4Q^hFE&SGcX_0av`;?|c-{hL(ZO zjt$~A+;jbd-T4QHa^Yv46qR*X-zKiMJ(ZiE#UY2w*CXk?#{V=c|7VH~Y7#OGOq}~u zjlQl?`H!y|+m!F-c;Kr>^uYSf2f3Je zX#GTPX9S9Nf86otU+U4ho%Hj=DA7YUw(BsUw~4mKPypm--0pqg6je?=wdS_ql* zuSr^fvd#e0Ja@5A1YomX5#KqNz8dSGyXpjz4(g5!+tjK3wZ8Cthu-BvO)+lfb~8impBv*~W(Xqd=eG5vS+z554ds%iH`Og>(?>18##3mn zj>Z<6e7g1KoX>mHBx|B4n7h8W;%WN>PD$6Plap7b=E9AO<5(HCHh^IOdFZ&z*J zmhhvfw$$`X%h^*O+%W;7z@zfDlZ7khD^DA-F=c~}Pj)HBT!74YSkx(;?wVh>V}7}C zObb%2Ga|yPZ!BJHS{7=!^?xJom(=(9u z*q{w+J4YN|^ztUV3@R-Rz}X@X314@Ruvp@QxnXgo#c*Rp-*6>D$HCM>%#Ygnb#r3| z!%;DBcQ`5!-PRa1gqHI}jL`UXIqZ3F?S4b!aIiZ&tYm`ofX&0?hR{8~`GpRSS0%|; zbGCq=rM9LTT>HAz43q+LeyR#G=37IQk`6Ea0_#s!BqDI->P51=sThP?;!rvq%oAqa zK#W=&dP+hFK6`?cylqMQ*qBuKBzn1XR?ZnYkw2RMGmfMQwEEF{V@~XZ7#N=XndKG` zWP%{lX_Bj=Z!!z3241Ru+7qO^$6nY!E5R#1VA%3;;giQi4{;`jN%pAp;^ z=!=AZd-{sQPwQR(J-f&djNdt>(S{{zrT}D5UvJn3e5{}Fmu(J}qqxm)!ibxWFX@Ce?WE%A;b%hsBAji0e#l z<$66}leU!WbM=g*kz_@~IT>VB;lR~GuC?d)XG?hCi716yXv?={MoLKWOn*|F!JnJI z0706V2x1|OVzBgAb$vC=S|{Ho{8C$T+-aXo;1 z>S1nKUW%cJwhkGTtGE0BXXkE;`Y^i+Nq6;R2RQ0Iip3UVAI0dMs_MO!EU*SE1QkNs zbYwq}ZNs1nYh7zD!+iRV@9gowB0r);x`9$OD^Nwi#0u{YPwBH31A=os607vRZyPw) zdo~BP2fM6=0dZagQoi}|o5g8UwrpOt1&_PB%rbeGk}`nuTHe7monaC!aVIsMj0>kH z==rHQHpVUtr!HV`=x(kaC#;z%9jzgdc6+jBt|IHi6+(SHBQ%$_A+v3WT1K5}NN=!w zr=6Eu-{Rzx%U8?q47@oeaRrwyhe2onVL=GPe!&~m7{A{V9X1fEu1yA8{B;RA$JYlG z$hi41Wed-T^*Q5(>ds!-yXR6J&C`kyeh#fAw<<*)5-wKab}G?YCGc2+QXJ@Ms(sJps14?++$b<<|PTsV9 zB~=ZHq1|q)g_Z}Y#?CDlwM%wgVMU(?%O8YnwjnZhQ>QET4OnMCyp2llkJ|1`0!LLf za8kVF+RUaN=lEVHd6R-*S#zA-nM&!i_wqd0>_4oxLLu}eYYeg*Y4A_Ss$~<-`ob~e zEVDrE%)k4oxYlXLr}cvs8G1q0KJNUnDQz*$jlkxWdO+j5lQ#VCt$}zFH$A?dL0H&N z=@YfI5spA0YI2NDqkW=r9^BA0Rj$Df@fK;@HEo;xM9_b1_dY>f0oX67;OhQPt$lz@ ze?_#=pJcs&g+c1KCQ3w^g`8e|LE#!PkEZ0*RvjGI`o-=Ndg;#9uTGVQl4=Kq62RTb zEcE?X-@*JhX`^KSS6f#e4rSZ+Zxl}t@=kVHvSp1dW6KtUCR>z!tER{@_95G}JtWF9 zLe`MMSdwipL}e=s8pbx3#~vCIrYysE^Ss~p9^dhO|K7)SU-!9P=XG57dH#MEd)0?` z;bAnp-rQfB2n)UFlcu&m+Z)Uj!m5%uz}QHrNSF;|0qcQt|K4`iPk2>aFP7*vh&bH6 z9b~;QGE%A2$Cj#rj1G_o}+Oo{E~ zOlEI6WS;DFg@NeVob@^SwFKN53Rr!~Xe1;g57Gi=hjIid+_1cA0#5*0)odYgl|@C* zv3)RoA*{G(+I8SwzTx4)vyKIu3%mQ*^f*UZXI^K%hK54scHxF9^zU`2{azTf*f9}4& z4B3k~IZO=>8+C3-_fCK>EMV`(c1@NPe|pe01KKBGuqnO@`$xY^TGxg33m*||{pvdt z(Rs=iJNrkfxIUu52O_kOEyeZV% zoRoc>_3qESF>2l$+;`tB^K?InRQP9rs%pmxf zCs~20t!M~KV^H|}7NWO__&zUof^mZJ2Uwt(O*>T_ZC`Np#3S|!ZkxKlOQoa?zfJT# z8l=vA30MYCcn!X=h(7exB|~V5sn!n8NN|wcnWvD$H~HozIbxD+h1B?UX^v$`O*NbO zz-%c*lilNK$HHlJh_e6~nqtH>1FO1tuol4*A*$I@QCiCnRfvXyEwrouKA9R9EWWx9 zQvS^mBs^-OOzTwJ6uw!uJ-0`DAqV!oG94aGv!=P}x2?wg!K!9kRrU$T@1Fk;iO0E7 z1sq51tj7BnN=>jrkslecQ}rlA8nC$r!;+_$VO z@r8{h-0_q@_A+Q#zu=k1u?2H1T2o;k2wpKRrlXHui2^|j5eHCmbYZ+x@@HE?JJ^nX(F(%Yox|DX*@sba z*JgA#*VdrtAc6$@fsHjA64W?DKn$Mlt}M!cF2B0rZ>tiNni3uDZ)^!N4U*!Tgbalu zbr`u=RLeq?JNF4+g0vXPov*YYUzl2$&+{RMg=qkYD5~J%yK^aC7XFdxewGW}8%ZW; z-x+LU}p{7opy^QIAei^UDn^X`s7WJLyA?{Gf@ANyCS zZx~5unDqj%ZUj$ zn>uMv6g*0|M&>cRafGYL!)&gHpaA z2r3o3tRf#Af%jW17j*%Fg1Ck=B(X9O1ck{$9G~u z#Z`m|nRTm8Iu@>q40^FuyDvLZ%gu@7Z;2x;jUCr8TN6RPee2mNzgQVH)ur}zTc@Vh z>5Eg-bfG-Ry%qpy+^7FQBkkMnMn4Ss8p!|zHfn^puG*3!8K?z2jKI9ImU7A2^b2qy zrV^n{ML=IcSO-hd`^a3T-*o(Xqpk^N+-p->%T{jXy+O{)V@Ct0OKLPzx^B!lxV*h zf98QQ)FR=LB~EEIO*%;|L|y>Ue|Gd`B!6q55wG!Kju!JZr|3*YO5&)lSHuMEy-N0o z$6f>VGR2aIpA*oE7D(&I#`A9BvK-b=&f9FIR*GZr@NxCYQ}`wc9r5K`@Ks42_lT&E z0StMZCAT!kaGbz4wDq&1N&7>Iutk%ZC=9ds?Gn^lJO+Pd*EG3`Ay3-NURR{~fk6*% zXmJ9=kS)J=i>;}a((?TV)LCGN0s38Foue6xfHBTVPCaw5IKgrr?m z0D>7oaS-c5gK=jgj*MOQ&6d*%$zd9cJQitD97$!N!t{bkU@bL=?b{|E$JJqpv$ceY+j)!i38Th3^7ER`l|3l+8SvA+;> zvuplh4^14OI4)ctwa&IgZ=>C!upmXGmCpmge?rT3HW!I0R8ahJx!v6j2%I9JlQlEZ zZ_?iSX95e517l~pMlc}S!Pu_MDvE-u0<@fggPfEhNe*vFs)1CAgZ&6DN@e-Lxi@(4 zQS+Y6dXeGo)UuME55YX{(+__Wi0M>2$w{q_#N4>pbd`h}B{5^4%B0#6T2<_*I`KY5 z66q&&AS))^uVH+5Y$~^Lrd(suTE{%&sp_R5nVJ}A6&iln>V}`92d_v+|FFjuHEQsh zz@(V>o)oSdx1)Fq#?g+Xo!H-2F+m)#_S*QHs|7NfNv}+~p?8I?35~tR>~m;bZc(q{ zZ3UZDltN7l`7bIKpYB3;CGkS5K63-Ii2}@dgK@slDl+X|A&UgH`h78KPnOc;R6HrR zjH0Yx33npBjr|>!{LfMYtfhB7yU=j@^$A@u&!E*kQF!QrU;$*Ep-l6CKKvypV?m&k zeym8lbE)j0vsN{jT>xOrKTH$=9-grw>{QbYDP2Ay)sudEb2ycS)NSSSHttRRv;fKz z{A;ruDMdzjc^vE3a zaUv;7u*2FDuP@1bq7m)%PTh;BDm2F=V9jj=q2E#eg8@@GJ>$=5RNgajd{NcH|JSq`q zxKL|N^2T#`&OQtDgYByY6j|JZvA;pvuxyuMK#zX+=Kz3f1*NS&PXfSzw5l5zrIJVc t;{Qs0|705Ani!bJ!0~_8fD$4qAPu7+-67o|q9Vu;5+l;x9fuGcx|D8^7-A5Vh5?aIMWjPg zni&KJ8Q?p-@4dh8{-?t}XYaH3+Ru8{TF?5FNsP)QPIM#^5fDvsxp(dUT~*>wG0q`? z4=byI>H6iRSB_{(vC~Y6=FEdOQZ)meNf_PHMsYoemdL+ z)t1zd=e}lh;VvN$Ut&}W3gAtnL^Qeal%UJT>FQJ(+_@u%)i^^ zZ0{vkyEnuh(p#U3d%xprS6yuZI&6tzs-m6jaR! zXp0^)>O*Q8);L5G=G7PX)ZLZROSt!wR|UOd%vJo z;t~k{Rlk`2`elZB#HrWl_^h>KFpYDK93JyqnuYb15b-=cg7B5(<(;r^gX=p?$#BXF zmxr_UanqE}2SM<&=iUnb069@c&fCrs^ zCnnJTHWd{(v`bwnZTVTX89CyVm@CkgC7#f|fhx6H`S`dwtPGu|$77mko(i|{>RZ-= zE&Anes1`p{Wmx9zA;}N=yrTC->LMr$5y*&NxqF;mh%#0QhhJByNh(Vr(-UiPBu8-yJSvX%;*d^!>iv$UHNC`94|ZQ_?Zz~M{gim@;^3i?=f6@- ze#5X?)G=0N-C!a4LoY{`GST}{*bi^2xo`MBFb+`L5dC*&6{e80%q`mBK-h9pE7Dq<*ES!>_|;Wf3if zhfo_qb_^y6sLP2LNamzR4o7@X!#3W=>_ZRF3$#>mg`TpTJ%M)jZXKPB{f#&@5qx$x zB(2bCaKd&>n}llgWW^}SH1MS4FzVYw(@p(%f)%bF5w+(*V%cu@ds#(2t$+BY6+Y5+ z3Gfpa+?(nAkp{G3Ys`GgHvhaMYXHyw>q*ep*APng{zC?0XLy}&R$Q6OY5)ZbMf-K0 zH!o{rMnx(g%U}MvY|3lc2h96cZlg4(E)FPdtMkfGY-q@fR}xif;lUj$fvjrER3G7ya4Fy*MnTxcbq-c^wW7CDc@3{ z%-yedRep33%dVMvUQ@W;7u$gQ$9Is3z{}O#!LfxPT5sS;VQmm!ngb5%C5xafA%|i&2qMt$GY0JA2HYvRKV9lhm??80-KK zxGjPEzzFZ3{b`lzd>QoA#nuroF2O~?ZK`{+fQe_0hj-)0+3LpJghhn0av0aQin0f= zwg=N^Loh?`2QXR}O=Cp_9lPw!m!7HS<1aQ+dbTD`z(aTIcz%*=XT# zQ(dMQUmb}`<6#v3qI8Y7wa3n+I}Pa#c^HqyiFOcUCKaxk$>biU6EQlu{j?2Wj5Dr| zz#)5_R+0b_GeeYXVz7vup-F8jR1$rVX^O5=- zQ;1}U@N5vjy|Z8Nd(P{#b9>m&FpeQ4V$s8A#kMqCcaZk|_xTKSGq2qODwcOZ4WMu_ z$B+tlolNXKX>h)OIOT-MD_n0&dr6xh8s%;Tf`%na+lNSxNlN;(UxAAs_2O&y#(r0i zI6dVa6f{Z{;C-0J{8(T0;D_^QukB*4&LQvY_zf-w;=f<^AM9+iB*U%NoOL!re~a>I zr4OQBZH+d2J*K6!^!|aLs4#2-tnbV0$-eu@I{#iaZFuK_I6Aj}`0`urp+cT)?kMUv z@rSj^-*w9s-(=BsIVQLAb-pd6ra}JTj{U{%LEe_R2$uWGs&4qSD;~*Q!bMg-wNxeP zS&1W`*mu5>btA&SyuSGhjdzkRv?c@Rn3t`; z3Y#ugG5E?K_(nu00d9PMbRfM*X}j8~Ute-bz*TV*cWi2#s1~TO8MW!VBXOYHkG&nV zD}&*sQ_O}Z!(aGku1{KS&df_&e!L6}{vzjjj=(DyxD_!r$ZTDMirQ7;c!ZV8h8zqv z9a^2WdK>nkJJY+z+TrYgL9{bI?Z^DO|0glB8~!p%*R)MWjmE~6X4iSI`%8m{XYS4t zr~E3ARlBQ@Uky{D@M?s4@e*U+#;HJF=HSEoO335;odgiN3c>%BH+|tV3ktx_RijLn zOJ3WY*%}6EV13^+vdzstpZ(>4kF6aw zUBz1Q`N^#1y8+a|KvS9T{X=p@Z4jS8pCDx|ILn`Z#~Vtm572X+1l6R%Oh2@2PsQE9 z?SPppo^|D213I$AU0g54s9$jWaeM$?xAjk6d-@MD`*aNYh~%DlU8zSI)lz|4>4jHiKXG~wS=^5@8@ zCf!!2z5sT34g&E~`E^ve7=}G>X1;l z$xfj%;6P5gwtQiN7X$I=Hb$1mKCKpgL=`dy)E_moSGcvDGmRrQlB&&=tLwP^;I0Nm z_3z&blj=9JOf1AxD5ZDhnMSA+faVp_Sk<~5m&2Y1uTDI$8MXO-Hs@3)bR9(d0?Z!O zxc}uV%lG)(3K7WV(egK#)9vqUS+U`nh#w$hC^BT6{G+r)%xfkxsxY%+?mKucfaJtqMA;esl8W31@WKSR+TVA$;md zbvRkY@_G)lMyUl1!;j zpNnO0G;exjnC=AsrTy_`u+It`BMn3|sx156uWpygZH?hd`-s^)oKHG6Du((>|3&_N zebYwG)#8box!{0!g~9h()*9cV-To>O*Y3A$hz**`qcz)Lg0V0wSMUVOOuzhcYHvH5 z=?r2&RLJ^11qhVR=xxv`*HSsn{@PYlv^*HKI~_C=F7Y-FW|rq|x8}kG-U0-T`h#5t zwQ%KP)727R+npt2Mi0L~D*GX>MNLhz%xa4Xsx{qRmHe@}KixIr++bjtK3LmozDn_} z|K6Qr$w`F{7w^v)oX^D_SA!VPwRD_l?D_Z=f7ANi-EezwfVc1F-sf;4hv7HD!($X#v6$CgF8t5cf;s9m&hJ`%8BEfn z*HUPX;!5L~o9(Got6mDQJ8!bMXibH`8;wTYeyJ}UV+wDI&G(^Ns093+F|D=JpB-C= zjsnY-@7_N0D$;qps3`CMo(q6+fBpKhI)9~HP+6N4k+^H?8D#1vyk7lip8qk#WrWaP zyZ!wsC9m)Acn^(AIg%n&B=EJ{Cux)Z9tLMf{^jW?#LETmV6WYJo%(l1*7KtCMVA;G z=*PTE-Bk6()kuIv*P7S_6tpoxrs~dN&j@n zYdsEnzPD`wbA@@iRLIAte6J+TUM0fiLb4{(2X8KsyxR!qej8D0qS^b>M@3r$o9U?0 zD*HUyTqhYWlx}{y8`sm!aN+x_k)d6+s}W0n^FCVn+u=%^bt%wm8k^Ar?#f|3xdL$q z)m~HR-m&_L$_~|qctzO8*^qH#`Z+$dLl}$>u2!OmIQg3y zQ_LIbtW6zJ*1X2A_q%_TlJ*rCFa0|)Q#Rh{3t@0dG7Q;YaQ}`7dSL#y(DAo0s&KnO zp?C}CLWHq<1nhek&V?9LxAa6f3>wJGuSVC_x(moi!fD>=KCTavSxQ|){A;#5+xpP2 zdpqVinwpww0`U{?55jPu)ossm;+T|K)}HkHK6qjQ^5h7muEd^}5#~y&pGV%quNvRa zbwDz=OT7+h-nQlRmmHl=_nhLld;^&oNZed9ZvXXc1ZG{j8YZKZB`c)J?9LjDZDUxE zf6(v+?sM$)fA%=c6z%=o0|Wi=&P?FM?|lyXjWm+!&NvkPkf5u@N^0e!bp)2z6qbPe z0V2;q;Fi26c~utbu|Ua0YJY}^e&C0)K+82`WVUqANc8Hss{toVQWAu_ zk@PDD(%#G)o}U|Pz<9*GKicBvG%qLgK9<}>omn$DD$l!t@z9SY;#$Cp980*P(nwbM z>u!UnzEOXsUCj3@a^`p|*vrudZe-(EElySvh?5zzA|JzcQ4FHM;=^r>0Hx^w;f z*{278*C&70TCTb6JU5_d()G+rIII4{I~U1KG3Qvo^bhF<>x1Ks13V$(cvlmvwJfokMyDHh{a7q? z{Ol#q(rC5v4;tU5;Jb->QcafnC9^;Mxz;j9b3^m{KWyhdl^LCHNsZWVdyAb}1DU?3 z1BstJc!W{z#1`L|p_P^1e(vJP>GNyz^qK9r6K6t|ySk>6kSYI1JxtY~@U3@gZJBrZ zG46MP@KfSMhef6OAme0sL`+Tn-i*ZRxgO1MllZ?#S(f-*_$Jc->t1I}_v8CNK1^s* zF*U+Y$0xL?M^*5hoqG{}%fq>&h3KZ}`Gl$%AJ$M40~5Sou!$m#bvkxvxU`oM-a^5c z3lA|~5W=I@_hT0T6+o40tYhBSM1bF!i5 z2EF>Q_f9{-`cDhl`fKn`Up-GPj>NXv9z`53{O#5q&YgiB>9-mMvsfL!;Bb5rKl z9a4pdA(9Tx$?(_o`V`>CC_Xd^q?4k-|V~&?T#98n!M&W!`UcLFv@7w8u-xd$r&H6Kg zk*iM$*8}u3bu;hRuls0R-*4N1q|>fx|F*q;)iY&5bRlr|%V941B1EBdAZXo_TK~Ox z55D7_D48-!kpC(bVt35P|b>fJ7Y*7I_U$s1ltmKXpI-a`xRA z@h?CpE;-j*Gi|(i(>*Nr>5qz1ok!_#jBE5x2PA^7AZ8Q}Xb<*imHX|l6>UuZ+D`r` z3Btu=`@g@rthZt-aJQ4F2=lI)5ohNkN12uyPar-Jc~=xN*jsi(d?jU*{`L38bJ4XJ zea-|c(Uv~h_M5INpMt-%#nIRVMP5-XpFl?;Pb?A6CI?@hs!I=3i z6q0|a+9NCB-T7Jn-h3QRUa-n1l$YVt?n1(J7!anxK3G$v9qets)xE)Oyy_fhB`UM& z8)(|P6Epah!)8AF@1NDR-oR?VMPv2$Pds26``r)<-<~A1f6jHW!vkh6=paho5MqXX z`MiI1&d;z~u>v#hWc~wj8(do; z#*2?#$E&Kcz$O-B59h3*ek|rL4|l0ddp%;1w*8(d)h0{Fj!bHIxveH*qQqEULi&%T z+1{Qt&qe=K`ekYqDb-LYGKN_-wG{#s8&Tvhf`Jxt0!pG*Pb2rr;sJ1vzvChf$GD&^ z0q+6=U=Lt5rn-R`W49s3sgtbB!o9x$r)dlHhP0sRUP(o-(_jUNA!iPBpBnC~v|7?_ zhSB7EFvktO`YhYze)DX8$v>)LK4cS3jgopWSaX|g8CwCStgp$@%Z@%Y1&nr&9hlUL zsn@;?&WH7vi`-2Pl%B)S1os+YvTnwY4|qv%o=)7oO5w#@9<&UiI?Y=pzsHaDuJTrr`*q&JJH62W{JVcgo}^?8`}+ z#^H!tJF_aMgEy_}K~{AdAfywt#!S$q%CIe8ku#pLn*0ysEQot8%^&xE)FbY(wU zPwyZ$sBZdup)7%I3HP>~Reu?(=FvaQG|ne*4UQ!FptgOz7RK>;L(Dp*b<$+m$)lBJ*sHAqM->_2g%ivmW@81?N%g!4!O>1IKE+qv^q_k{n zdE9Cu;b$vqig6%qViexD zz&IFoFTJwbd~aN>th@FZ%w{DXtASJ??u36Z@5LBbq0`}q$2`N}yWVnn?jFb%vT60+ z@ySog&1SpXEBxZ6)>Q#u2iKJtMr88+`&n_yk8IxCX(@hx^$Xis@B_sCVdWr8XG@{V zVFF5mw!K0)>DNCFAj0@IBY5rpef7JK9jwD=*55P=jOz0V{il=LtPVp@A1h)imJ!y+6B9G8OOn^OIY|h`cxXL8OF3(GRcv z%nsxRPfWAN24ZU?YVH#mZE^A720sS$+m5m3t=0#J zJNsV`15>L4r^{yFr5APSSsR)M=d>NGF9Z){6fs+KhU}lINMw&3d2_C$R=+|=I0)*a z7``k`MKZ4ZY~6InZXXh09UCGj67vzc&$J=M5NbudC`g=1<;sU_CYXYBQ0;$i zyUD4vJLfK?M-8E5E6Za!vx4bWfBZWF!LVJ7yYIED$4+2kR?+x>HfjFw zuf!lq(>1@_kjloX4H&vR7 z79!~EX)y;I`gc5SOxhtWtf!XlhVKWV@Jp8L-R3Z|xgoVU z=nP9MZ5*LH^Rw`PL4R(8zTwO)iVM;~)TT3!v&_Y{N6%H`2{J=OL#UYX9M&RnUpl1I z!8?V=b~@ZxnZxh`74taaB;VxY*EUMS0lywJFdIvlkqj)Q%3;it`6})-!3sv*O)vSW zOq9B&8<~uWFb{>^lKMJkZ{iqX;(@t@+y&` zh)ROoZ@{zhi&~_-!cluQ0P zhZB;0x5GAo4aYrd{_0-KNIS4Li1UpzkjSd9np`;z50J3GSE!>1M&gNKy=wP) z9OfsG&w2%(rJ8?qs3g{yZjAtQ^)irWStux2bWshGyB3g%dCA>yt3p}eP&XUyrmjj* z?%SVb;_~nb)owyywC7Q>@KNPiFu;>*B{ieAt|TB`^tzA1lp(VEEqOS#gO5@0GkE>i zHmQ7-`P|AJ1%vh$L^rFcEG9W(CjrOl) zuDyzN89_o{?s zj(5D6fZtrRw`p3h7zgxlzB5-Cw>FVCNU$}*1? z?x8U6P6K}obYu>wmowh6zGo3DtYV0Uwzo)HC6pdlt9B#U*>2%riBg zSVqrNfWtVi9tGcqrrhh;2hQ=l1wkPW{(%|w$Ot!Pza4IYpKd}K}Vw=KY+SO_qoZat{OnGFq5J~H$^OBASxwH>W~ zBiZ#-JJq}=om;XxR}dCt26Gat%lM`Gc2cmSxNY;}8_DjcqE~-^la3UIlLcz12J=Aa zo_&{!(*(Ca?FZhW<;XTZ-6~z!`YTf{7;5cD_n|aSy|&D=H=L0FNR}Qf6+yAb;o198 zYCm<=-06d1R;C{ASg9I75^c}TjBY597XAJo=OuSu=JeGumVTbSH?Vw5rZ?a=%qbpl zp-XasJcMYoE=^H(kU)ZcOXE-GdR8Y!Xx*m?eps!68sAKLImTg{m^@9%mn%LQ5e6J`OSo@wHC(t0Oe69F^}&b=!5X=ClVa^2_Z|(s!xhf zK;UoVgbx~#!2w`=xc<0x4K`?dzIFm0r)ZpEB)vtnxrx@ZvMtFFmsqT?sH;2vNMb3W z*Mr4^Q(0v<@M}CP-PO*2y4v~hDpwcl_aDB{cv{W;*BnICY2eoCQk&YTK<%~G*;IvwM1k~;Qx%;cqRz{R=ok3{l^M33P7_F z9VvJ5lM<9W%mGjHp2ZM_Rd-s38!dl&%(3;0FT{Ou;M?S`-uBr&bX0Ni;=iK+3QKTU z(#6)7LEL#l9?yU0P`p~nW))Yu^V>`?L1y7gpbGH~b!2eW%><>MMAz=AC#UL*?RP@gGCCj-)rh za6m7i02ED=WTUpq2-WopSy{LbMc+xHQ(L{WB2_AUM@`i>eS5s^;me(pbm)J!vdcH1 zJ(tj|2#F#JrZN!+B`%oImXMSX10tK~zKUKyGYigdj0sf*PG>GL^As;-|Hr1mkO^Ic zRKo8jgnTSw^8QcU^m2mlfXx-y@*Qy)QuVu>y+Al!p80~Si~c4ubnhj_9jW#0z$N1% z+4gHwu!A5Ae7dmOh9ApOol_P&(Uk?1^s&bXQlC~M@c!@NrL;^Ql(!&R8UZmnEHX=% z-ca`>gzSCjs3UHjss~FSOP^*i0UN`;TciQ)BKfIRB+dHdc!D#KkxunGmRqV^g*hL& zFFSPzb!Vv|d!(bWSl&o&sDE&-n}5o-F$-~oEWPu2mG(Bn6F93kz()15T5JxQ8G#$4 zDfEFJ6uzCvwhdzk?};KYBi?~jt{kq>NEX|)+T?SW%&PRTYdUw<4KET~)X!Q*7c^&fh1Xjk% zH*NfR2&L;5O?aYT4)o|=2ENYw9_!4B&zQF1qx^V5|8V>81Jg+@^dK7dJeAS^pML-ax&rM_j#)fIbuVK-UYvHw7tzl@ z#fjF98ZtaN7kxgk!8=WG0wQj#@VE~5$^SmiVG4#*Xhffj+Nu%tX(U3Arsdfk&Ho>e zdAtKSlM$^TPCkSRM)y68p3jZqgu-c)ys1yUKFx~It6F9t0<~3hfrQIAGu<5tW~V1| z=zLGnA?)@a$!U81AKi2ZcMlj~LCgmMOGs#gP2o8M4szi6iBTj;^&9CP;d4SW%4}{Z zO2D~-b1tYOAbK95c^BdUJA!o>ha{vfE&jITJ;I0G9ugWvB@35dy)g|L{BYqe-I7Mf zk2m!;h2>j^$bUe~n+A*9*^t&UYT)G3`YKq7zt#!I%&XCZHDWJ6~sxzvV&pR#X5Pc`TSbtcX zCQLB>t#=Sy^`Ba?f$DK|-#8RI6D6M@+`|-&z-J)vLL-IGsZT=@_$I{8ousBBrb`cM zqx(=^HC44$o330GO;ROdC%u-+yo8ds{izvQWARkny>13r)iwJNV4vuOu0g6NJwsh} zogjh0hj60f_R`A^vpMw{KxPLw2|pcid@v0unCRpXzGerpz}3>$&j(Mus(8`63*hR`)>UY^hv97 z;oC<4dWmE&?<^fPR|i%sYD% z6TKW_eA&|PTa6TD-A+RR(1RpgQ(Q*|bh#UaipyKPt#G{GA?RjM^N=tty02wra)Ds3 z0C^aaMnJ`k4C+ZwJ`P~Jn-m9iPe$-@`R)ZTjjCBnDP{|@zOghvg1AN^a8NAIT*>C8 zBtYyWIi5%ZFBv=P=vB4+eeh84hyb?xVnPutE)Rqn#A8_&A!dS5-sSyK!uDz;Q3ucK zcgSFl;JiEnIMH*N$64}%P*X+08j{J0JeP58*pUV%U>DMU4X=iuOK@wr_B@tKfe6=* ztS11-`1B^UE9~`Q4Fo6OdLgSJX)%w1D&#TJ-n`Q$Hp5)+3nNZc(aq?Su__Bqk~D0d z7*FTw9f*rkBtF2^G}v7WCT+blNo3t-c12`33y|oZI0xLfgAG;7DugU6mBcRL`ig&c zBwB+?z!MuWMBlNF+otKrTA6)oiaEfTw#A-0XW0GD)$U>bo+3n;X0U%^Kp;gcy zlyhR0&lu?`zSYq)3Dy%D%2qNP5P4w^frOUf17qsbC`6_7<1;Wy2|>!EkQFd5?gNI` z!ma^$&)-$O9w8{Cz4QBq>1&leNB(zAIx{SgsvDnbBW+umdI-Si6N!`=;lCD`bregbd9R& z0zqI1(TRN!rLSPkGZ;K`$2)U#Fj`RbEwcT(BeA^M!GQ@I#K@b7o4Sc5-6yfZk8rZ( zuifhkq&j9LfdF+Y$wT!4^X0#BUwjvPclTUH=my!$1m)l8K2`#i=tnT8Z*i~|R$M0X zxLf=D-XGc|UNuX7S&|iTD7F;iH$t0|@QDc^DBF@>qF&eN{>X~mn95~32K5~QFJVN+lWWuvGK3o z?ZBhu=rYjP7-Kvx5sXta;f(J-dA*->xn`!-d7B2wK)B!ajBV^v_LJE=UKt2uc?2S3iEZV^;Cnu5%@x}Cwn;LSW34GM1N-&$gE<8Wgy^@5BnPBmVO@iq#(Z^5D4!X0o*=X z2=~OMU5$Pc*Rr7nGuAt@!A#0^9Nu*tV3MJkDjrD);i0y>x8DRtwXJ+zvBEE%lzTzu z@nlIm%m;cJMM0_>l{Ko3lB@%oLm4A}vy;}F@r2YDw)md!YN&!4=; z0H!hL&a`q`;^Mj%^>omftS3FGM5OB~kP{MaU|uB2Cc-~a4W?G`7O!DagrTp?5@b`f zUf&^=mnGQX&$PMNP-r?!-H!U2coMjzk{#UH8$JKPVkoJOqK-lo++Cmrr#o?J{ugsA zqL=I7J0{mb`g&Jli@b-pq|?+BrywF$g_Q>j*m$2$%T_2p?{UxxcZdv$45gP_2gtCL zES_&|i1EQSyJ~5Q$F1<)nx$CuVnFTGut-n3Fc_GW;7X2;l~=s_oAzy7P7XA+gn$Lv zy{W%(_x(rXkM(JP0aOA6dd8C-`?=?*)D7S7S*VNBpAv&K`Y9<$C3koC)*EAfCHUea zC>`Q)a`W@=3LIULV1W8e9W0uq3y+7+d&@rk;zhaR?DV94^z)Y&ZDrL6esK0r;P;i_ zDsj?JQGvtTcENTsRdF)?Yqj{vhW38r3oMvINCi(!E(&~0x51AF=7-^VPcTUpF3Xv~ zqZBR*zsa-F*K31Lsfohte_qpDKgAefyska>J&VddIKBhMP!_RL(3GVwdcd_`rattg zm)#CHVE=Lur1fC3CI+;EZb|m;G(@10>WtdzB&qL(pqeELfrnmrovUUy7+(DQiC*bx zLBF~RR5?SgtOof^Z*&r5tT)|;fQqf{6?k55{vmC*!{T54^%3}s{|xL20`Htf$=KVw zaO6L+F)8YOK%D+~Z*xhMRUvDy+HO|W;%U-2pZZrI@Vct)9ME3v-+rf~!0gkX zc&&>SMyjW4b|BhxUD>;N2@>~@zD5~#d9U#fu7QY|3rZK&Ckn+5fl(O!Nz@7W@(pdd zue+><1EYEV->O;Sgx?KbJy}pv3>pvM4BL%E2NOPydAv&yKcA0u(VdWNJAW_Q8?<+c zZy0XfUvo8?=UX^&-`V7!i|41Gmy2V8T8{D2_; zsV#{uy`jDlA`hbJMAV~`&Q%nH1J8KKnH+G0yPyK_h{Y46@qTU@Cxb=we{T8&=hgbd zk5Ls{{;$D9*>=Tm^ zyhw@X^f90eq9+JIf692<^=LSO#%^4h2(gMXcvI*H1uZC;BDxKL>$xQ3k{x;&v3vzS zZxs-ni zO9&{RHu(}fn7&XnR$yimE4=byH18`utdoLK>!wlUFsQ!BC~Wi_&Jb(+9Q0UGP3N(H zu-s9p$#eUBR?J-FS0_+?(VzgycQ_#@4O(1SV2KCXmPV)(Tv|>n?o9bx3_bPEhw=_s z83m41{akfNbx^j=IsWmmX~BQG40gUY^X334wy|gnyzgge0OnFHS!wyyFf6zpKl=rlK}HqMxza zJ?ol6%f%a7RC`YQaSrZ~?)&isuic}4Z?F(nygqqn$BDxfrVXb4!QKRj7{wol#T$z6 znU^vrDj){7A9*VX4tT9S@Wc(@Kd%r2(k#ZkU|R>r&SKtY_Q~0PP&H>X*(NMH_uG03 z)G*O}(UQ6dn~hvGKq7C3{2DfRakwzZU19nh6h4A=C-wo#vF6igYWfoM^z0%b^lPE6 z;D6KO8mn}T6fNL6fYIF>W$?0`iqw$V8ca;aVp~Ch#zUgB&>^FB3w3Ws@sI3t2&%AAMsog!?xP~hlK^h#5^$E`Ax^8&AxFQWQR=er$ z-HwwjyM0Y6Y@tmoyX7W?0F+l;3DEN9@twuKyq0RyLsqBab!+4~CN3L^6oVTE7h%qo zVtus1lDLo8$#!Niq^Ur*muPut=!2`37YbRvHVpnaFhJk8Q|vZ4akcnD|MoqiUqo3q z5Ad2DZWxp#kxwr=t4Q(UNfp>kP{3o_0Ea04oLmX(Idr8a;5V*TD!NWd^iBH{Z*YQ* zvplodHJ6>s)mBd_WHhKBwoe;(zkKDhz$GxIn!MP3sL2l)KCLOG$IMGTfRRv8s?a1% z@3+SaF*WqKBZ>1&Yb=Q$s4NN4+VaLoJs zhvB}t0abZnT&j36hIJEP_WUPPEu{3y<}Odt=?#>qgK5h zAam8_xg^4O_@@dJQP2z(APa32BA*X_c^X_><1?CwkJjZ5T%?Igg?}_h_5|_+q3m?j zaTp6)vvHGgtyv@AnTOPZS2}*;7;1%Q+a-hQa_zgF4xqp{3e@;sb&wJ-o%*iTk;*Kc z--WVt=2|0^>1559+TRsFLCGeAMtS~YdvNMTk zIspkv3A5lWHFk4XU&d{KhUAxR14oB7^uwb;RFPi$0Yj2`xmZ<}0IOTzS(hXz^5Q%t zuEt?_cr-=@6^n#MP@E^NkAJAFM`~Rs{8{}9?39_LvQAgOmebO=C=M3;Z2x_=KS6SV zTeBoGgst#=3d3*|$(9t!Mk4+%nS5^DGQ_UR0Duh*VZ{S|pxDni$Xwt?_E%TPSm^oB zJP*ds1qraXa*QnoXZK3Nbe*h)3*c83ntF@8HdLsTc_z8qvh3NRA`cCUuYv4evayj`}Fb z-33@9qiTaxu;g=2Xb_fSGvT&x)!D%sAQ{@7U=b5AV0im-Vw zRol3+=~P6LE%o`n>Fq+dGx9PD=fGPL>`fyjmctB!F!lG0t_4e_iR_nxn(i`gk2=p? zJ|K}c)9>~lvn=}UYm}RQNu560BdfGKguV4F?1b=RKG*?2C!a~&O%4>;^OOR zrDtA2J|0?2+P(hs`jx1PVeqq07(Tlc_)WsOnNUbmwZ5b|=>c9$C-Is8S8Rq5ZPA*+ z{G!PXMZaAD`vBcx^~51K+a9hRm_=ReC{e{IsD&NOdl^?K(6#Jl)^#{I-!ms#Topl2 zd*s!^POozp?ntIopOAKO5CD;u_h48)EsGR$YL5n$XtL|0j-0)EH1GsM2h~-jImL-K z39~ou%>BwiZEbR@$bK58kYn#214kpt5&OolxnzL4dn&*46GhI>zX1-2FD5-kS;t$! z%Vti@x1H+&SoFQ+^JOvzld(Dlb{A7%bZ>dcSN?A;9<1QU$Y?y8V%~@8>9*MOgLl>r zRC7;FuEqBsje}h~#k;49`ow@U|CiE6v-)*GlsTw;oUa;=S07le=mr%%LHU4!{L5xO zbjeiG`s63Nxqc8=Q!Ud=pEQof5e(Zr(WOqf4I>>uwSL7DCZCKj@}nVXQf>wBM{9n? zr4?`;Z3GE4l?`6X%c8|5_j3h@1WjD-H8xbstn{#3xXQ2?Yn=Nz_x!h|cN6#PHO4mD zrZ6>giV)B7f)~s$QYr!r{|S3Svr5jP&dB3;50_i5ZzB@k53ARmX~@$NTh7)KI0s&7 z)xQhngFU>BSf0I``;N*w?z^)oX_0cwA<5OINqn+XwU{3|S&5lwgvmQb2=D2%+VZ~< z_nf%!@u>X#(r46Y$HxRDW5-Jq*G!$jE~Z`3neaz)ZMp;Y3r9M-7(ExC2x&22SP|Ie zg_%|Uo%+)sliIN+p( zyMRWqmb}QrxoB*aadinO33l#zmUq_q*LoQ2LVv&I6ZBw{+P;1NHbS|u-T4Q(!A%lG zb67u(ERAZ=yx4*li}Q=}thK57qgPiV+8;E4Gf#cwUT z_O$12IzKP^O6UAGCOLNRPM*ihIND;~=iOrqjs{yr->OqLjr3UZy&5!FHi?4gfsGBt zV5MeX)7p*J+7QH(U;GC5!`2J{>S?kQ(Z^Hh%S)U6R-$4a)XAlvOGa(mr-+YqG}HlN znNt?M@lXjq+pss`@TWBmQ&?gpogyW*M9zEn2@BsusTaK=?hL!z(2+WARx*dZxM&ph zvGGo;Z7l1Pz3fNkBi`Ds;~+6n2DF>3{R9IC(~MhC28L+sRvp#2?-wM?at^ z@Bh@B2e4i5kdQeTv=b3A4JNojQIRhG|MB#dQB{3k*OJnR64D?c-Q6MGUBV@#TT1ef zBCT{IUDBPGkS^)&?(Pe`=l37uc|THvbM{_)tvTnKYu^AErvtq4WO~?KRKnvYcC*c}9hMMiRLM#%Z&2@>-8kVTH2V))Vzll<++AZ80Ct8}T3jUN8iEJ~`2C8*H z{Wg~p&HZ%^J)zI>_Yyk8EaM3s0z)O9DMY9Am$$6$^!Bysrc<~dmq~|&QtGzH(II@9 z2F=*nbXyr;gIWyf?Bh~t;s(h55V#{g=6bg#qr6s>`0d?lm8$Fv?F_!WfK%nhq{;o= zeR=ccs>7<>eAQW{v2T#KyeDloREg68F#hiw4%6$s^6)M%;mhjeNLGtaKkncS=re9E zjHxE-QJKC9&>kC8=@yt};5axtzGJkTcJZQ&r0_@obK|ebP@LBcG1%0RDy~_c&4hS4 zIC`{zu9|d+v=X7jrTbb^iM3s(3ukIUt~Uv^Bo;c`y zb$=wL_ISi&A)+y{owF6Hw=u+0l1jh3b;%ch{4TM19Va5p913Sfwq5f$=C-tNUo%-Q zl5L|*)Glcs=rWEU1qM!yqXa-l^bD)Vi(k;oe#VbaOjO=)HBf^QqOz&Fl1poF%YSsk z%oZd3RiLk}Oq}K<^b#qmaec8$y2_T#=k}#O#6CE~3!R=FUeq`=wV&MCvdgwak0)x7 zz(Ix%?VjqCQd)!(H?Ou>kAtud-B4YQCSmGkzwfNn=jPMzxbiJs!vrA5;TI$me3jZG zEEvNHZ!%hboL7l*9!kjdWJP{`2|Nu=&2i%i6$#zEsW_0=RE_|>_^#5id95yRNgEv7 zUy&SnK%P^f^iy;rI(SyI=7@dx0S!(1DJpon(EP0~2cc*ou^_EpTMKVGX~Tj3 zI|2=2qO!9cX@`Bb5x+f?g@!+LM0c1u3F}n7Fs{~qs@BfSNT5epo8u`pc)H5|)6WU% zA^{6Y8u!Xy9kBx_THQ9*uh#$kz+2XvM_XQ8cc#!^b2QKeM(T7YdfzR>EBzNpGNRuh zuecM^1kV`TaSBUe@NXu1t;%Ug=(4>AH0`LiW`5{1U;jzx!Jkf&uy9z>CamwFP##3| zOAzrE_EKioHp`Dow(Vnesy^ecP35qi#*RAI9x8579Ru1o{%S&mV!$Jq=TfAgg2g7is-xJv}Er!bl{b5N^8UznVCst zF}jFs;FA*rGWC*G3x9v}(Hw5UqqHyCS^XYK6x9RNlTAhXc%^$$wYM7CC3{gy$bCi{ zlx0K$gQwCJ%}e=|2dGR{SJj-VCHb=>@F8m2;joF+&U7h zFdo8~#2sJ%CaKin3FGramC6Nw&;ZMZ4Gg`zL&ECzipl(oJt^S+dGLpsyCJ(Ryq9t< zr@;>%zkG{0Uf7EoyN4-w1mnN}WBD_xsMZ;vB){*~V1Se^fV#+`=e+s@E2Ln8qbl!j zy<%{Q2HQnuut`mT6!7tMidc@G=m&s<+9K_rmem5Mg>plBi+ETF8CtbqTG_b-=f80l z>(gC57da`^4Ne^B-RsJlYP2lQrzZu33Y`m|5@AJ@IaG^CnUNM73p;f^%5tB%`*!36 zph@7sp$ch?7=?Y%977GmXvD|jyx4=H@-uYVwIo|Qd17cVD`{yyR`$VG4A45;W@tPv ziEAW8SW~{_)*MQ@Is33bYg#{57bW4631{I;W1&E5jpY4Gu($F~I&j0b0FGLyA{zG9 z{q2H0`@+t)z^LelhiBKW3O>mrYN#I>7*&S4;G$dYd!p{!>@{v9kA7y$wz_I@r|m3> z-9w@mr_&sTVwN@~&V}dzmkV4M%1t=TEgxS=+^XJpfkn*QxYun2WmzV#YGIjUDUnt= zZ3sMOh5TQdqi@(l5NJ?KM>4X9ms+th)L-G1gqQFq1sV)9$fAga;A1x8;&JDOY7kg1 zj?M(xXIl{^9xJ~3(WJ;5n(klDQW^c&7bWktO`V{b-rU9dD)=|>;#PyI-6M@CB__5F zTg-*QCz(`;Y(x`l!1gtViDIVJvwre}6%p>UFzhE53|_IOcqupJy$xl}z3~O*kRik?0Acd&3*zX<=7eaKuyCQXhIt#v~ zecnBnO-af41dvP6VxO~%J9*jlD5&In%J6o=y`Iij*=yOkEZW`4#>nFn3ZEN(xF|yQ zof@JtXNkF)i4874P>Bb#W{}SSebe38aOq{CIU)$L-@nVz)FHeL z4X~z?z#;ewR;gHHLG)9y!HV|m)_LciGMF=sK3B)zL zV4L7zJLG99-6k(Etd`u=h?TeTP_=}?lR$)XU?sY{T<~)9(GX?l!g*D>k=Oyc=A`hZ zcU3`3C|XF1&pqep^)V&}f|eV#O{iGc+(6ki3pDsmv_?pQ#`B$(myrl2mFmL&rR4fl5*xj=O~(Tp(Z?spL(&Yatf;zZ^0yVdb{WvkRFe{Qxky zz6P>p3(!CEH6hfvv8oR$Gt=hb6s)y+?{Qk8tQ1w?cN7rvHd>!K5!7R7LD14H0P1Lb zj#S(IWydx5Rhia$j|4oyil_Hcb1!CZTDB+F5zkeHcUi~nwO{XPyNYb>5YC!O_eG&d z^>9S-K*;G0U&u?@#d`jnD`QH#hG2!|hX1!36VVr?O--m%e139blS@cj+V^qNYUbh+clbbVHpg8^c66aor0=>Fd-?qLjs_RWT6*Jd zdn&x=POI)r`6y9**^xP|1(D4Fw)k(|2oCy4sdx0R>J?VBa3)~zWw+|4G~2jr+btxKXv zN$P*MHB%E{=1&(aUe*vCtN`IozTe>ctJan1)r2>1T!039FrL%J6elyzg%b(;D)fh& zzX};acI&Rnd~D>^n}p}*>=Ve{p9G2(|z9|5s3UPC@jyL<>o9- z_U6<=Pp^_etyWUyMjLgJuIah7o#fo?I9zRFb>dY8qaq~#LH&p3(;6SK0y>R81sU|? zX=Z5&id62`m64+2`OzMHQDU{}e=goFW&M8*Bb5!sZr@)Vht~nGx|l!XDJR9{2)N9^ z=NCO=gtMe5_FMu5R#)B8P-W`iTBSdUULdHstv^w0$ou80F6R~oS)1$QbHU4K9=iMf zzD-6n)KI~y^w56(b0YYNk80pTI~WqIk9s#zE3{o;Jk z4^;3p!NO}zUlXTaTGpQ{44n*sXDnN)jDO*>G@I6+`4|hsr9C2c;eTaKw(7Y1Zk%Xs zisi54CJnBU14~3IT>WZp4Ds^jxd!*kOp&t{GnlJO>&UcFx`xO*v3Q!5$Ie#-*IQxs zUYkeZ;du2kNF_OnF%P?QJX+`{PJs3@%8@K1h?RTdWn66SQ054`0 zljId|4LPUsF^ivGf!zFqBi-W`W@NL{mwcE{yZ?b9hTYZgs+!>Qf~mRj^Ms4H z#`#iIbe!LCn5e}*P9JM&KABx<_uL1q$)aQrMq&3#cMlJZ21JO&k2wEge>BA;b&-E3 zw@L88>%rX60y+9Z51`NN6f3;5h(%Qjd(t&RIV5Lo=x}-+cIV~4@FGogo3fFtrFYE>%AcY;v>jOWHewwEP4X`)lst_K2aF4mP9d@r{?bU(pqi9K< z=c)RvKgEOC*t1aix7T=3aui&p;%j*|KK^HZXM_5Z6QV|7rd0rskaUYNpd`eD-+nb* zdVPPhiV59d!Qq^Vda)j0pdqZk#08Y*>!)e_%0TF?m>UI2{iuJJ(a%b8fU^2&{VwH4Wqgu-rTIPhEEUk9db7J_(w+iQUK+mq{)+w9}#}=7PJz?ghhKq68#@82&?W zL)FDa_Q2M(ipAxAwZMTnk)(2KaNx>Q%M~9ccL1Va?Jhw_uMDgBd7&8(@`BBF&dDvi zUyqG$NKO;%Eed>vG=im6zRmiweY>>;4bV6l{7(--Or7*PH;9!o7)=P71Unv%G5AeH zUcBu>h5$sK|D;c3<)r9+BuM2VJ{GHK=hBDZR z>PL1gb3?Yn!#9*@fWM?_zvY5UvUhQF3%46PV0DH<2DbCjuYvpCIyyoml#$J7YBeEx z5Y(?y*G6_R6>-vt@hlB8f9BDrt;rxWk%GOhO>}F%4`*}^Rh@6Wa8ri zsQC)MEZ&Uj#F_}|oX=`@Pce>Hqy1EGhQWWNav93+7R3A!eByGCC&*j5m;K4FNj&A# z*kKeG)!aD7RrDF-)WWdNX{(~sawyCs>jSM(HE;3r=x&>PsHRqSqYS>8vWP#sn6C!o zSQ~%b_Dczk<(cE$?p)1zeqPhAW0=VVz|%0ux=W2-T8xZ4&0O&MEm-&RL7*c@g+#EF z)xqi%eijEiNNuLNbVE67i~lO@|HhQe3Hcw{XF0bZu9#WUb1Ga?z>mC@P&Nt&KQ$GZGwvif&oHt{n5(h z)^~mC?5Elf7-U`8hftRTUQN+ z(xZeWd+-rlIniI5T9PsBOoObiRHVredW9C~yDya6GY_P&r3Iselk2wCJRU(cA%^&Y zy=4CL;DL_iKglVV0h1s;09N~MvA_rxJSeZS{=kWCl5T!^jh!IG{^*_x>mhO7(aN)< zNvmb{3z!s}w(KCog5o0CtuMI#y~8VB5IYH*{cTRr0z2@X*Obp}!eXY%x`D};93Dik zmA?PsVaJGob-dN;68uI6eie>ao?g(yQ^BT?A+ep;X6PRWdU|aYkRy=cHMT6<+2r9~ zR6du1cS$zE)i&~ujx{T6a_6E@1M6#`vC{!{aNvfLn+&^D>S0c)e4&P{z6q~tgxt9B zIG*o?;R|8Hm$~u}^yAguv}Z$DJZeHWdT;o+6)f_GbmDNZF2|u+IhJ>?$@pJXt2x!( z2PaQUG__2!;G~3jfchVpX*P`>y`Ov}qs^gaV+-ZKoeW@sR=iv8XQg$fz7ldsKUIUX zG-szsJ9R9x{BK6CDqgwd$D2_8&GO&%Hxq2*q%4Oohi|m%Y9sM{D027~4#nyjmNV=1 z)OI1wydu$DO%$iJADW+yp+5<9Gx%wbUQ<9)nxA>V4>*&f1Ma62fFalJ>+}zb(gEU?VKRd!1KPUDDSHfs z3){nR%Vbi!uDHxivQx!hI|2~OmTALPl5c(>ez&jK-=8ICDLm)~&Xa1ejpzK$ z+>b(@5D!*4fu#M}tiOj@<(vDV6=wZsrF(UKmCTvYk~i^9P9~49CsK4Mm*p~F zC5Fo5?%S=d;qPz*7E)ymdpCoJm3kDW%&l+A(>+gmK;GXX`W~BNnIWc*tku^lURaOO z-d)1x9;+3r`j%55?~@sC#WfU|=BKcyrcesH_?Th;nI3m49)m!aw0#{9Pq=}=#)ikj zMXAd>Yf7cW!Av(6VxS|`U1H?dzpZlw{$n&H92W099NNlnZ`~_#YU*2=@4nt^Mbt72 z>4jJUWRCFN^*t3cJG3iVo}!i*N|sZ1aeyY=lL`$b;zu%}A5@{&CWFx38q?z>64!|$ zEj5-LwKz{YkriGG-JW+!tFP}-yrM0dD%TOv{4v(+U`o=N-nDeovgJ? z1C->a9%cfJ?f7kQ?K?6{oqY?tk_z*c)tBL-wC9-G;UjW4$fDrKiC=j~RpTcwo%G1~ z;Tt@%$prxiaweB9yv97!s?UG(Pdvp8%EIm4*PY`IEWld!ZhD?ui=aqC>2$o4M`i)Q zr?n6G6baE{6-6xB!T+{6UT)Hp6)>%?@}G)B&1PpiJKCG@lDn1RDdgO9+CQRr9G%qE zjW)%5+4nkgw>XTz@fI8s6ILYgeE)K9N~$8_OCV$9CsB`K~AB%?5yLjHaA+!>K+9MWsAJ7Wt*l@>XIg27&2a5mK)_ zJ#>ENiB=~%gK@=6LS@J~rnDXRk1AtRcVElnBV%Dy{%*d;9NRT~?N$A~d6n~9Z4FG{ z*Vy+`u3U)~FNvzQm-xWxb>X<}S|({=S^k*vMksG)!Romg-+lGDL94IFK_Z|K`qB0J zZ}l%?@}_^>(HOkiRKXI@{oXiT#c6Cdga0lc|ARQV(GJAJhCIh_AF!4-3C=z4_}#XD zA^0V3G-PyEQbX(ifXpw)b+Z}KFQ8^b*Y#PL^iJz$330fWVM^1|8E-Qw=kezVx1#rU z_p#s6si<%06{e%p;eDvbBv88Fyn#@@iw_-Rtbj@eDXvG!I39gvyHk?mZWw-yL>O($ z3|v_}t->3td=DM8RQX%P+!vP_w>67pWi^I@MIPT^h;fLY#Y(@eZfxCR*3kC^d|;~* zB(wF<@a`5j5Qy6iL8Pac&_ti;%34jzHlsZs@TDq(-3Dn&J37852Izfz*)N%z_f8$) zEI_6UispZ^{Eo-Ic4|5@WScJQX!=~kkwb53Ka#ufNqQG>7C8+u=3nQJ{P7eWZei3` zGpx@{Dp>AB&F#CqPPPFSbKZ*>(p% zNDq5vg}z(?_-*Q4b`Hwx!5i8ukFzzRdN-wQAi%b zlu#OzJTJ`Mz51%q7LxN%HI9TieyT#@__bKydwkIqjD`2$D*5Ml8~WLUAf!aJ+gvvi zQu$d(tH@p7dM&ohW1DX`Y=ju%lG| z=!>r>pUnQ+>zl*;>(9a)$t;#oE~V}9(&+7KdX4_(i)=)d%Hsr5Ybm86@gbH1oAr#v zKGs+3XQeEvby5`rc>)ZCK2Pv?Q2ofPZL2ySczDWm-fC#B-6_Mmxa(>l^d22I94#s?>GCBY9|c~#cUb&H zcN|*V%T)9tEp>^Z6F8=Qclm|pA8R0nptSISRMIxo%v*tNL9I9+XZ9-0ppbyKVaV=} zMfVhX#tW}z^DRyA27Ylp5Sg5NHHy}idSHA1Wj*%_yfeMOA7Nlj*+a)yB|W1r>;>kZ zNsoV8X1l((Lev-Ijk}Qi4fsDZL9!>%GWhf~*TeYU@9%RfY^HDNYvbVZkmx6r&MVFd zK6@`z+fs?o^-$$R$UjEOGZ=sI=LdE4u@xV7GQq#OvoUK6#poXLuV$WwBc?0;&9vVW zUmaqM_v}xozZHxPR$X$SJ6a_Y$U)5xH zwPn4B2Iu5FsI#ToE|qupB1H#dD4qNc#}^^Q#5AU@9w-+Z|p`Imd7W_SXONl*9r*R|g< zQYSRTm_qR&dbwb?d99Z+E z$<{h#O3KmkkQL9H~zQ8a@4Y;pA-&sipwsBnvDN( zT>MN70}}OQnLJi|Sp)EFjybw*c#_ngo`p`@9~xPNS8wvM|9fn?SrxbYXs{}oqHt&T-`SVG>0MXAp17KhH4YtnV7HFf$ar;gY;nVLaZASl z{_XE52q=)m^l%QVYaXP=VJsT`ud^VbX0=+zpxMW4XhkGJha%Mt!bVG<lH zQd#$@vikr6QIH?@{$YGg?F%3uV^Ryx#6DaXOLEZkR=7h|U9szNqLl%syKT8cubTx9 zB@sQg5X+Zu)XnuzF!U*2mo7JrvqfeSmuqT{Nf?MU^z{_Jzg8y7JMR?;5=jY^jL^!w zpEcOp{la$5mqwE{bJADjOn%a#81!iV+r$P~%{KTbd zM#7%fli|ps7vCIxy+Jp=%MQ|e-b$O&ghelPZ77e;(?U&=uEBo2?BBcVzWtL`rryfd z^>?aV*zn$=M3n9_;PyZoO<8&ptxbt>`Z_oVOOpG-?LVdaKJ208nQ9w!HcjithSf0>W@vIHtiQ`9}xg1Zt zp~2+Ae(vm>R*+2R(KSE$I32m7W7bQ69P|~JXJ#?ai~2N4pALR>*}zgOUulf1v)OCF zuy1ea#V=SaB9BgKJ)M%ssK@lEQ{?&D4L$#Kq;K<@<7$r+@HjY((ev$Bjf*?0W-hdq z{ej3S@kW!m6&-8}liKBd9aWMYaz1yEA99VNi<7s}(Gi0BBYqF#)6f9h?7R3>!Gi^s z3l>RzcQkp&$$stuCaW+SNw1O$g|YkdkkfyUvz*y;H{G zDwfO23Q@_2M%XKC0+s`>8}zmMOJHa&Ma2m==iP7!yMd|W1(>JOrM_OQUs@27x>RM~ zt&pV(u2noTrULcTHznUyxxy<_X7YENTK=ohU4Yh{_ba^o!z#tednfdsFVI4i&PfhM4PV#+R!&iJY*cNC)$hWyvZ`BbST(jj%u( zFMHEKw!MAw@?IA8@PPW|Vd2Y-e30;0ypso&V%ufnt2%+gBUTGEPGI;2zGb+%;+FDn6f|an$L`g8Yj%M}Kmo#)UgRM=} zn(PVE`Z5%8Emtmiu=)_;j7jve#XL1JOLFV27vv z(K+qF*4pS=;G>auSG`KK#_#r&rV8JVA`^k3*^tIvD{PN8;_rNZBYNilcr_@k47>PPWXZQTtE;<&t@MMHJ+2v z+d1GY`2;5&8eyI{Y_Hc_|0{>U{j6cizD-R&%Y^BCI&Nh2+3XA^ACv+i(rl1zCECij36|smtmS{qS}2Pz zActo17c8y@C2Ol>iP1gKWTPPEHsRCTe^qJ63YIXgLf~uoGF@Mk?#<~={o~c2L;;aB z4&;_P%K7??DVEZk4A-vRIq|)wwtmrDmX4|<(A=b|!3h@-sTB5VI8|K?)DtZYhO7VF zU{4o|+}~;lv0eN;CK4e9D*6c8gLJ;1(}C$+hrg(BkYCbOgvoYO$Gmi{+^qF;3)f#} znQz~Gq=I9Hm}jQtkhhCqs*aOyl(%NXmGDi5PU0c6ESS!_<3{+t#9g)7>`W=oDFKM# z99*g0JZ_NIp?@F9`yh`_Ns+p&58drdzFXJFXzdigAaDBE7C+nq@x-7Qf}0TykHSV= z(otWe@AR7E?Oz-FwHkV}HxkCLiNV4b4)$AFRgXpgG<+6FkALS#{T7WsC?xVtW%=v86lVb_a>|mGs?rE`*?+8J8XNiTrr6;xG zf(Olp>3%ei1~9$!C57!%WccZ!5uB9PG!O1nt}sA!417L2uy21Nry{70U4OT9W{|P- zK#evLBIW>kDQ0WxZ%QV9UzMm;{8AE8vCU(0Md_2=xD%YnC7Ipx7%Cy?V23)c zrit^envsJ~SxViDXIT3qc~iG^@S?hgaqCd{oKXB{ZZT@uc#%B%_uhVtq>*s!|Cx|{I?dzDI)?jv%f+V(;i;FQfeQLRBBSj{>F!Y7IzN5;IyGJVK#(sWu`w&KScHoboLpIIbZz~NtNK}+%0UHsQjxKdGxG+sZ2iZf(w zKZ0I`;QLY7Mn;LJAgc^>WX(5_>xSyBl10kfR% zU-vKzQI(PRgLq_`aA@F=qY7Vp8w|b>HAfiw=evBRqN-b-y}%_8$RU-4R?4Rrjvc_J zt7z4NNAh3X78Fw`U?A)ZuMK{@1X6I`L4uqty;^HAZf`wVOS1Z@gB<<+LB91cDFAP| ze8pF=VJ7MJj!enq*(Q!aJME;&y;g8%LDE%SIFJPT+p^jCE|cgqAvMnnvGBBb9Jk6E zqdUh@2_LXDRRBxpmwQBJoPWVv^J5vQRr95a;9mVk>-+(xW?71)y9FE_vsr9*XT@aG0Gjj zn`p+j7r-ebo!nk*yPWq(4v#rVl-}_(cZ`FCSO-qtxK^Ls7lW~zuR&goifD4~L8~wW zwrJs>Y7p^8*lhE~U+hA5b^Z%qF+8UoBO-;F)K5%dwI*WGUBZ3e2Ffb{hrSlz}g`J1LKtZm_p&3KMdj@GBnHWHP~HR4Qfd%Mk94R>Sug!Sg3$S0xd0FE^88S zlinMZA}wYqLF3slnJLAp3kIpB2=2H(L&}p8pv;tZ@+>PVAOA7%113L*u&0_X#IOPG zvRKXK06Z~C-HUmPcM4s9salsBnf^}HAzxGWU!m$_urcWx*k1MpwSf?*(U?4AjFAa> zi*D-yi7M1)_+d4P&U{Kl9aRch}L8-2YJ{mRKJmsqW#uHSK6ph0;lV`Aa z1PI}z+zJV(6Q;`lyobKhXPj9dAP4;8dBB8%Lw~G{lhDuRPRCYSQEcjT675^o^S&T$ zBw0OMCfv;R+O1A1eC8Kqjg83)dMHx>y1}&;{~q7I`{b5E z;~Rz`gXpub$EB1;_cI;2Xe96qDF3<5|}-G&jYE9 z>8&vw_B);wOe>kV#>FP7HUztK5U`U@Xf5?k{j*bF-oq3r3`kd zQ7Ea7^E2&3zY5NhUwjghEk?P>Tz7|Eh+4PnjHbbn&hHEHlpdTmH+)}9iu|>_0HSNk zf&XxjAAP^5Xw=#`Vli2Og#qrKxOQ99&!AAqg#D>ZE4W#KfEGL^0!iT;5F@u9%u?P?v&4P_sZm_YIGzhnYcd(kIZa#n#t4xyJ!*e>{ELOV0kS zbhlP#SoM04GiGbDVus`Ri=~Oj;{bi6XA>SJ&3*Bp`{iL69u_o*l@#fHeD2TfcggBs z!Sf<>tHBT0vPN-F#3s};)?iXE6mz58Lv-$B%~k>S)y|m-ZR0NA{5H?@MhUC?Et6a9LMm9ySSXll zUM+9-L`f=NiZb z7~;UnV>C(wFrRAhK=!yhsYH8tNOMg|oF%iJ7L_*eMXLeIS|xy%+r%@vDjQolmGl;8 z*%Vhoyoe~l#Sz)+aj{sS$M@0W@lpYFWqg)c8}nEipN?1js?r39JAFT_Ah*4g_RC^U zc-cJJ-Yo-iosY5ap_=6IHm#fQCLje!uSx3lw+lhPnfA18tCI2~RsU06GLcjN&d;!c z>63T|Q~{u(0HL>SDd?Qq#@f6mB*#sFkOEF|-X z*l-w&E18D_a~uqhUKT(Pqu+Y$_`Icb9;WgH&(B1;=`O)#=KjE6w|r1duB$)rkqci- z2v(XV^?jkfjySv|oWWwgoR^SCe<9IGk`hXq^!CtRNMagz#tCnUGj^eoV7l%ESKW;= zOeO@sd)`6FPa8elsU96*UC^<#YVR_CJp4n~psQzoIl)VdKAW7k7yBD-mSiKJO-mf5 zNC7WhKzCbIbWTxZd}wtg!fD2cXw298Z05Umo+~^b!1c~=$4G#N*(h3g$Ep|d4ZcD6 zmK@YUWyc1O>u+gpj8?zyJB2@D7HmCMh>zzGGUs;RdpWS8jY5H}L4MwBv`!F|?A;Xn z$cuew`Xb#WMhsmllUhVuY6S`N*b$J zD+y9(vuiX3GSDGt?AM=8@$b!bW6dmTRe#c=)XGX@P7=nKIxT0DJsX%-I|d9+W)ZgD z)eTJ4(bhBbn*pqalLqf9Ez7$Gg1C$fEHCy3M5I~lwu}6T{sV0rA=<4B$!|50xG4KC z&_`EzBghCv=tQhq5LjP<)7>ib55Mi_c%Xv3)nVaREs zWgIUAR6cAvGxnN#a1DjRWPb;PGZy2Q52jOzQieGhRT5tMqsnOc-kzBy!kUx5T+Y`g^#VX>Msd1#0jC; zYd5xafDOBi1NZsc&ruLdLr=@jW{@K?_m$ZXBWxA%|L}aMJ;5aZmqA)7+7#9Bpn7gr6_|E|z0MT~sG?^0Yt~PVn(^e4n zdVYl_v)or;*}Fb)8dG~r*X@Ns5j^P69^-DUsHCPnE0Amnww}N)#EP!rkW{y;CK5+$ z;1X`i9zn1LBFf^l=&iTnwtiB5*VL&Ua zY@qs73G0;kuIO{Ml_nZN`};2g`f(mS^>5PLHOIfW%)B9mOXrZa{EJ>jC$To&5&MtR z5QGw5c??Ao7h5j}g#S3xG7}cFuEXi)76R?{=mgpJ|8DC%aSfWc!5|vPz zwy3=lx@}DTgDGDgD}b|1xN31E-+-!KL<_oZ&M zwz*|Ka0Nm_%^qxF4S#c3sL!o`?l_~Yg#}(LwYfc5(nl`H?AD;AQ8ooO@T6|#(=B<{ zCcSpO-&!pfQg=5O9N#b5he5oQS-(b;D{R9I7Mklf z%~RA$@qX6#3|gD9r`meRBa0?pr+QkTB@OcwW6EdVcR3LUq<{4egAj~59Wf2O&PNXK z054S4$u#aH3M$3A`kBN~Y2Sawkj|kCvQb*c@`lDQz{f^lPzgsl4Je)jWwCLiC^XrX!Km&&?K@c9dPK2 zwWkNuxY;Y~lz35*)Ng*M)AO_}&1a`z8e^Kw`|@n>K!L9j3t5i47eKr-l5ZjtLt)3} zL-_G}mJsO37=K|!EI_3UwiCq@pf?^85eWYnvh=PZ`(X7p$lMe(VCxCsl{U=~ZrB;f zGJRAiu%df*1!r)?rn%&Imlw)jX=sI{D2S{TZJ+>4tO<5mpETCSV?GE$Il4d@Ph`=~ zxjUQ8!B=oN3;UMsWctmu_-l#KTXnsv0SmV}C7=Y$1Bx{5xQ^=#+FwC0~1y;yb+!2vvjWfekd`|tD}OS6Z_-y%Fy+Y1f2v32+vh8 z$X@cwC_GHw-^M~QN3?8*k3uZdfZT(N%_qpjO#A=^bkd@HHSC^R+gk|3`n#)~{R zQNgUWdolEbZ$u7o)LVCokm&VLHvJC`eM>L}7ZF&vg|zfB?ygFMNKJN;7VOzeTz(i~leggkk3)m4Y>|MpJ)s0>WDXw} zUdgCNx*xS%J@4UV7P>acq0;J)kLP+Hjo{j0F~ycqQDS$~uXfiyvlAS^0MU}8&05=R zHm4-x8|}la2raTt_!af-pZ%pauIZXHNl83HR|Ecu^4DWRF=hQ>Y&O#f$^8d8fiS-1rtf?1B7@Dhy}2t`I$ z6z-J++XQ?WF)UDAJmHFlOR4s_J1uRTjqL4f)IQ0AEnyROo8-}dJ_$Ko;@@>>OvR&B zoQ7XT*=135Gd4b2u>yKzIeDG5i>!3E@n0U1m~lQT0M8<1ETUq4l_pbN+Px%u*_2G@ zC1=h}-4!5--vaVcK3c^Y&y7~*#|$!mKh5U9wtV?I{F(f5u7J%@)nJyp@Ugr^7#zDc zvBXq_Z)P*^fF2YMvL5 zx#nAHTwZ9u#8AQQ1AWC{^K?tR+lcUH{WmL9Z|=Pe)KvwxC-=Vd}a2R?pF zX%reh1-wdID;It{CQhF7StfM4g?jPX{uq6oPdkFcc0>_+JF+%?E0RBU`fxmTD=WK5 zcsQl0(An<|x2qsCL@$jKC~L2GrMR1G1ok=RaCU{?@^N$EAcESWtka`%iNXIix->Vh zR($$w>ss!Mt~TYiic#2sQoiZTx4w?adOyJ3kNLCNZ@R$Co0%#*IrE55uVOMbc3VTr z(%CxbW1+_6%f*beKvy=p<)=_8 zOT3(ITn+X|T5wn;Af98vd8(}hutzgV#jo(a;$GE6Y&x|Pt;dwSz{N!xB`>+$w}_a$ z`Su{=$f>Q8ov~rr*0@Ic z`f}Vefiw<$cPt&+m8{y`UZ{37Npj~!g^sgk160snU5i`b^I{HY<@SH4su!j8V;J5t zVmh4S&FE@PzDV{Zx|9?}k%55cN0FgrBhXXCB)3>hqEn|#NNXQ)wAo7?Ty+ksjykc_ zX6pwUA%mjR>di#I%Dc~Oio45iBRqIhw+i`lN3jm`FSb77 zq*CFFU@8)y@b?alIDWtHb0Qz}BXbU_)0x#z7noa5B6GE4jNY$s-ipN6{}8)@5dtbT z)J`wXWaSY24f3o{=^?2!=Qi*L@$~ z-I24we`C~Rtg_cj4EfkVfa5`ynrC%|YgsBUmghyqT_F;v>IS>_W&2>#P%4WOkRzBO zD%f2MOhVZF3uf4$tBw@j*7+EN&H|+5Pka)II%hE|RI;dKcs{AdJ1Kr82Go7nP67d$ z&%?h9w5napI91&x+mtlsP&>ZDl%Hn$vxzp1Fu#tz$w0DI?y56T;cf_D}2Ei9!&8HZdZg zR9L+GU6OQi(hO`pUYt_+a{Q)eN^Tom3p5b`u~_Rvn|Pv%YB~$ayFFjmopO$Ex3A-V zqq?+*Q=c(DcfaGIAr$H4EPUj5F=G6Pr8!Tfvp(F$h3{+omZ!4<9Y%CC4Gvsrt#_FS zjJZ-inkO5Q>+(1+e0Bge)Iy91HyZ#wo9~kU7b*s439mq1{{*eBR%{1oDQu>^3!f2< zL@?^Mp7fKJZ_-C`<~<9G|B7?Q#ioFAE<9>;4kLeTY>D8CNkVCqvO3$?fY+S1-QDpGRLy^GX2lv?Kvh25*#Ta@jr3mLLqsoMBS80Xx~#*H839>x6WzwLYuzTA zsq$vz`DL8E3%s`~$&vipZcV8La*HkvF$*p$apOewhiuH~V;Y7*ZCrn_zn4PBcSb3N zMIlh5lB1kzIX4cA`M~`iWX{|AIo(}#T>Jn(C3+pWUlSE?l7tp}!g;ltKGkDFg}@46 z6giF`mk!=E2Z&C|0mC6-p2!9oSBD2shNp+?Diao?*)YcSqab_vYQ&Exh3$!byQuAL z&=F#8vd5(Njp`RZAqwUZTm6p)ulmI>2KU?RvqN-m{xy?$NULC8w$#QExpNy?+72F< z#0}1}l|iVsE@|>VWB)mIK5CB7YVp^Q+0f5IhAGGOS|JJ+0zXc6n%C+{8Sx)lB4lSf zkFB!Zr>Ebqx2};iwBZP0b5gpWmuq+Nuj9B8XS%LoJXgc$X%q0&n1zT81fARXR##Rx zdX8CchX=QEt_{B=SDyHW64W==_EFojSH44Er|M1l{O!w+9##*H%u)+hGIfWqUF|&B zZFsv&lJ=v5Q@~E<|KsT^qpIq@uceU|q+42~yA`CnyQRBBxJN=lN=iDUySpykA>A$A z-S9s?DoNMn{6TKTyz;4dN848@W(W|mejB_~Esg;%fwU+W# z*S6$>*9=I3+PS}8^#6%4jxjKspZItu7L0RUt6kp{7|yC9L$uJOeUZV-ht3jVeM%!2 z7Be6AssM0jm{2C;Lv~yw&TRFo;($aZ@zCCOIwxCO=>f!>baayWyT5Azs^FQ5Xd3md z9QZv~#%zT#R8F~fP6EmEl|C@`UrjTO$<@L3P=dx>G4J#?2wxczr;AcyW?kAxsIaBu z>m>hwvG!1tI#r<4zb5|hRcJI-%r^aAkwgp`g=4*)J$C*2gkaHV4|)N7EXunthEN;2MZ75w8TJom+D=y`p|(C*5q&`8PkHq#!J68@t~NgY z)%-jw#JpGubBs_2@|;QeyT-6LzvNpCa93x`Tjg;XVqM`+SX48|4;3y8jYYp1t9b86 zP?#kLO$*k9RJtTonD&ScjmlTK7ONea0M&??xHq$bg=|)&ls{I39eP9dX$E+1prUc_%_xmR0H9a0fQR1S0!@HzIi}cFLM+L@%4M9vfBf0KE10RtiB`D zv%Z(GNupRmFP+5I7P5T$54<$Y(h9e$jk z&<9TXG(-ukarYW|(bbJ~)*6vj0X42>kteIUU(MS;)rI3n`|Cb-eYBJ8Ibd{JBr5Q_ zm6U$E484>Q0enLrt zU4->pbHS0j9^~;XUD#ZiPfES^MCwzhZvake309XDO^zNgcpyvxlNf5WO`U=f zS;E38UHS76#|w$f@6!!lqstcWT);8?_E}Q7Lhb5i4LUA?(q-fN^7I;7buSYAOb3Ep zH@n~AQc$Mfjvd$W7~{sNIuH9(2)ql-G~bH%i(W1xvcG?31hN|oqaA-)_Xu12f8UhO zdrM@cUBDvrN8wW*#RT)7JGBL=$)I}^&;P4&L$aE>|$gFP0X99_#KY#y_d%H_T=X7 z6EyfI+2?#~trmH%TN!DUu0Sbn@f>Td9}w+3Gi1t|pxn%W^5sNN4<77OL7sWi9Sf=O zzaq=s7vAL<=v_;>0rI*jM02;ZbcuSFY{En9T@s$T3ucnVeGBq-U=J8_e-*qcD!P3} z(tuETuhxxUm-NH@VLZ#n&P3aQhscq?4+l-WRzR34X2spn0b5=IN*%X!*|Q2Fe))mQ zo+sr%6?)s(K=A}B-UEsha-mguNj()3ybpr!1M-*$UO(szdld!D5InBtet$f^vWMJ6 zc8L7am)FP}Y4mu;1TP9Ro*i?6_by#}@g3Tm;}ie#OO9e@837iJF-K=#9<`Zw-7fA_ z-2d|%XHyVWFseuCNlS(n+03hds+Hitp0X`hdg+taR=P!2+lKG&C+}(?Bbp&ymN1sy zlbomsud=c&E3+7E*no_Ma@U?cKp(?4D*pZ$TO0-?rt^jG(z}2Fhe9;9JOsWvqmy^J z;Mpf37mKr|e=X=SD@1&E9Xo~bfYoj1mqR&bz1g11NhUI1It8n`B8R7SWy&(2Hr+-$ zg46jkMB*YL`xgz`Jf%;wm@{tU6HI(QB?D%L{h3wsn`5|6A5V|qMBkbv`9aXlipO_6 zSbX~NS&~KmKHLF0)``$ehXd#S56U`bTGnq4Y!b_L-N)eraZxU12{e*}d>5`{kqn#J zuq(oPP0ev%!PjCdm~tUU-)8-?9;!q#5NzcrfO=O&@tYn@TWGLRPIV^GE&g5fx8UX@ z;q5irULhRz(wP_13d*e2Rhgu{o9QB*qfManiln1ute}!4LjQFB zA%$U(J)kVpV;_NI^zQ7Z;fa)qxw&$y4`n2dVx)$&Et3pgWCeRjNfRPRK7B*f;*6B% zFnnjXyOk19Iv)PE?BE?5ad9S^F@aUBL0ga?^~}N_*{(q2=!EFYTGAg7`RAn%kQeBX z_T|xYKsnNE>o7rc7>j%zpGixP^g}am*I)SW_MDbYR3D%>ub(_T5l|&Nstwtj) z&ohDvm$|bwvrmnIV0`rTQZ`yB{I?4u(d?B_dkd9^UEGEvc_~e|U&vh_o0!ck3V#{a z(_jN{d1uO6euSBiwTJa#1%Oy81S!Oy8ttfwV;l+{Y(Gq{)sMCGI^n;#_6L|DWtFo8^;%MwnwxD_i8ttzdZx>E zjN`+u@nc+#Ez3VOEjlg|`P;LUS0iwEpwU$0y+aEaj(M%dV%YC@fp?H(nXE{n@;(;W zU63q)rN)!(ibGpmMnvy`8`)^qJr}Y*37Vy z%Aun~`QlSa!u;|`s#tYsBl8ZqwbKBHJ~CCnIZ0GC21ccOHHsGPPWSDjXRK+Sc4|#i z0Qfqe&IveC$NVhh9F`xic-?qf{yqxmi$Zl7xM#-mVeYOb=k@U|8i2mnf3EAcr((D*y-lZ;*FpWQXUroPi* z?7A@OuzmP_JMxaQ_uv(QJEDNbhn(BFp#s1>lqhEzTRrO+!H99-1--x~?eHNi?MQRV z5$1=?zjNZBXPGdz1^s0X0JX=GSpKsjl#;J-C+cF%h2&gVrKSpIPN5u10-ET-GJ zvYt>A)LCFY>w_+V#u z(1GA9?41?f+DnL~c9sOV(8l*2Q$QBZq#Z9oI>;(2c1Q!ee~5A;#~ z4=GtsP^k}bl3lmu6*m@8;X@!l{qh4lqPk_O&cT+u8>{^>sal!Tj3O_fKvJ(ionP>+ ziAH&G$B1|N^XxN9eQT(a{Tp-hnGg|&2=g52Y1UQqPUIaEe8tSi65UsGl|Gb-wCpPp zv-6^^M|STI4)_Vy1qa6-`OMuZX{uky$KAr1ZyUMEK&PLzu&H2lmcZ%W7*d9p5F zRH-(gnMX{?!U+1-H6-5#+<#;=bwPi36ax-Z7pFY8y6t)pOL4Tw4k^*bl9-xRw=Ng6 zWOzOOfQE~iFS_oTL^ua#)jYJ@Xhs;1MY9RqNJj!3x(9}oNatv@rJuZa|H{?zNJA>x z!XyBvPLviA%W*L(Z>=rjbzC<}W6!=3Q2y z*!nJv1{SEf`K$0{u~IvX7v;&pc!kHmuW9REj#ZYfd%IsP;%&%tH3ShJP6KM(tFfEePQsb34~d0_-AJXS0}S z31gXfFDOzTc)Bg4y3QW7zAT>Ro{`}TpNWtFd-5clo4~Gd>v5WLbt^1SWinFjJJ;j` z&m)U`3OSrHm-)IwlW-;4V7(HJZ;)>qf)DJ1n1|S0>*jqwzQ>U?BNu0eH%XQwYlIVx z94izB=?i&q+v#I`uV(HV)`a;uGFTjk`Ii3aT(6|lsl(TrOy~25CiME&=;XdKQRFwf z3DDLpB6A=kfLoQYd3L4cabInvtYB-c(9Ej7wPM}ZV0os8aZwQO#Q}dJmOkH|Ai?6i z7Zau7xcsQ)=*wxYSpG$$zc-fCuQsAhmYpD{Ty_WH>v0UCW)h7yQcx{&m$K6G&HoD) zV<(>lg#~859Fy}8R2f_U?uapsgFLx3cM&2jkO1}N;<7w()RzZm5l%$>>wlk^7J&xV6Kaw;erH8AgX) zz6z8Lqoxsb*1QS&o<<<0>waCz3!VsgXSV;C_1b!=i}mig6cb5g)#Hwj^Jp%7QD-cG z?mGh-@#EMwf#%DV*t3H!69Cx~GI&u$g8Is{^BFo0?koHnMR9o;mLv0|8VzvhO^hoK z`U9Kl7P8MhSYgx)AFGO~1XrKd{cSN;!wvYEKp9daokd#os&2?H^(Qv80o4VwI!WhC zg>mqZ>4}UvDQ}R}*0nM8IBmVjp4A&}N-FMexCCx0w;&(_0wLn#MEukpw>m6vpY2&q zRs`obI#G`TEx=O@_pC0c#&$QqRZ9J0KwKvD@Hq4GG9{!;pJX?h)h>rvagv1!XvQjU zjh>ivkeOt8l@DII1Ve5`;&kY>O6PYK6giZZK`O;5{V}_?>J)=JpObjT*J};Gu$c)8b7f5Bq^-6)G)H~k)`{fydJ7PnfX-h1g@g@>o@ zPsCgQO4A`WT_ID8;Kgbms78W%M1qvK%2w8%P&qoa<(_&QAJ0QY9H$8l zcW+v_!ggF1sQ~{X;KC4z8c7ilSwP#o*||VE89C2S$Gkj<1Exs_FXw|=B^x|{rgR-{9d9><$b(e>7~n@0N=$^`!=cAx6~RdIsp zmoqZCr+4DHn~1`VEJ(Q7t9QG)12*ME?|*L-~Y znLaETSESp{fg(;J|=^5wm33)!B@wCmk-wiU5C`;O6KWr7RbnyF z7&K3`cL2MyIRWOU3w82>&~>rM=i!y7^qbD0o5D!fi`~WOn$1n?RSR8tAxTAb7cz&G zq=7p?2~}JXMPX^I7wa^l8@W344)KaKXK&UDDJb!VFa@NA1n)IcM;j!AXtbP65?jO2 z;A8j{Ju1)tvPQMw>?o4+&Y9*_UDy4_&x5uV0?Bkd?x$N9iX`X0s(XBj+1Hger~JB( z0?F-eA(Hj93TnM8U}y^_h8(Q_(EOe@688&8cIjA0r-!+kXx-&@?03DV?*ow=n4hdQ z4RZb2XFIG)@^({-y2`FAi~9#^`WfcGcGC{#+lyW2)E4>$LwxwXvVQ!sk7AN113(g} z0?o5}#&mqgj@ln2m2aQEIvEr?vjt`jMRz>*ewUo@*5lp$m|`L9$m?Cy6S%=@N2-r( zWc}M{Kh2IHbbU#M|H~GJeM`UOZ;OvhdgMJ`moo5JgT!#(7FTi}?R2aQ4WaP$wF(GB z3<_7G1X6pH`U%NMEUHN8IQ6B`!2T9$XgwFJlkb8#zOZ1jXBxuar9OL^t@ncYI~qBPHH8tA z%!Z;GK^=#_aa~WL5A!WJP9cr{LoY*ltdnaXwJi8V9?WB-4-cy-^8qFnK#93`yNBTh zmJcUzZ+OkgQzl42<{&iA(Xp_jM`Z8tWBvsPr@$Vkz z)?ZbxvAa;Kvz**b8ypv))~8wkDu#T|V@PU6&YGc_q1`k7)vM*atpg|D>@1@oteTVz z!Z=E4hUhTm8E1~#yM@vQdDRnwXG8aMRa1UqoBTlL%{Xd$ zGKIsAkQUj^9oIMNOZS)Z+iKh1`QUw@`#ksNXcO_%#>A|>FnTlK30t|#owGxIp!b8W z$j*czbhenE)0p7xn2NAuZE+dG7~$C~?QBB%`^nhtdwNwmg$(zdPixI_;__*SldiOhaaAVevYJ zC#Hl4lPsUTSt{UgWT!NX;dVq;2a``$VC#}ziY1r9{<_}Sh2@Fgot*23<+Om4!4j>L zP}Ge5)dD6%eZzs)H12IX@#xR}7K4R`w1?;E?t~o!T(s)rsJ2VS>|ltx-F$asyr$_^ zJZ0K(re2o!9qgfrx)%Aa<~tihK^woBa>{?4Ra~1e1@Ma4UN|14+^~wHVErd1fge?2 zcpdMn_E%0QmU*Y42d&8m4((RJEO8m#P~NQJMh_-2uNi+$H&gGH?uT1ZM9SiT>5~ow z;INlnjg9VGod1P`!9WEr@_HGGog2(*ojy}>-|LkV~({I%BI zKcogT)*zVTCAoDz1XVZMIFpDNiUE4*QBaARsOu;}M9S96Ijp+FI61$WV7Dm}O|U2+ zI;?#H(CCJ{AZqShb?WMBKYP;|3f}ynI3+5Ba7j=}(J73Gg{dJ9y97i&7*GRcgzwX` zu56Ie7;mvf-@+7s8)(N{<76;1CEsamUko*@mu83}A9WKMc^l5KV=*-Ae6=*sIW=vv zbz?{HzC@{0ps&Zy0a+sf*38%#IpFN8{A&$bc{i?>+8U3FjT(vID<3Fu(6s~tNI~7| z2#eYLm6(Y5aS4cl!opqI!5PrIP4Cjin|>jz+nogUN?=j=`A@G4UUl|7??TI&9J$Wp zr7JB!aHfn4!}a%{kQP{mB}EayIR{?`4G$024QX$T%w8I=&@~N;)24-2es4zA1I)yX z)uVQxGoOcAFIx<7UUOy#%v6l`fIW=-)cW|0(2_BiQoSvulbWeqU~l%CiQIYokWedV z4a~o=Y8C6m0eK*OwH_6VVX<=_5$3QJN(aa<&37M;$&;Vnm1zrRY~nL*^(@e{N^-W${o^cx_Xy+VG%r%&4iw4wyaT}HzUJVrWrTajXaLy4uhsNV}+NqjcJ zAW}heGhkn5gGXyH%OrHF3GyAj{4jOBvAkdHx)k*@U2-|uFlP_H;7qKRu^jDqa%M*6 zj*#aHsFE!(Xj4MBk~8kDXW58SWHFT;DSkA%ev`O8N~{k)Cn<=$r=#hEm$k)Y<(voo z2ov)jfp&6R5SpL*IT1h0`Tbv;7ZPXN~ zYg?mMe10d#Xcu2Cu#RLS#PnhC+op9^b4|JpMGh-#S0kt!SOxf}^;&a@noZYgdC~AP zx$PrTX10dzChJa@YasTlHW7SbFSf*2M7BPc1CO%`XV>X+7t7gs<8^NyXtiF)le-5b zkn{E%2zdqU^yBO(pN$sl`HXexeZ5EPjbb3URS4+DoVl+mPt>1${1@o?m`DA2aqWrI z>Dj8qsudFY-Lne)NOW1O@F7WvAjDVZDJ`c_7$wH_DYol0)H`rArb})-Y za4euso4W)BO>a&FcUKkpm_zX04S$oAI*8l81D1Ww@vj=4E8taYc zDBqu?Ps&2Sc+gSA+;25%*_;?{EG@b}XEMkM?N)O}r44UIt@iBn5f4Ai7=JOJqq=_d zFgH9?_Af7LqCM8S;E%X_IJ>%oqTtzz^|}>mpMj%H%O&?ps@Jf_KN)Gd&g6_O!9>T} z-mC~BKD&%Jyi{eSV7Y(V;jU&Hy zX+42ih2ymtm-o55JO^Piy z%GhA*@)qpEP=<`Mgz`+o6Vak&k0CO>YR;4!*{u-+8MxX4!%8e;iw(D0V{P{zK~_&h zAK(xTRR$qQ8?Py?zhjx=F!ok{%qhRfiJK0Ef=7e0yb1>3eERJ!JK zAcxY=*x1H?+laVyXCaMoPp~fBpIyNVf47eby>$iwUB@uGES zywT>cf3$1ERu=`9tS?zcj{8Q3j`iiumT^w_*w4bnx``Lrjh=V`F6<4qY0AGIDANDT zCMEzlYrLh(=dykKzYg_>Ev*x0SbB_}MRbGOt4ql45u*DXG7oDqXw7GeyrSUt5){FK z>nHA_yf`6Lf?hA|P|z~B#Bu>&laU-AaPmpHLN`UACw)3yHKloebOsCEL%IUIYl#k)p_**#vkM6 z6tWS1-ADV4)ok)OvwIT!XKUA)ru{BXM2u$(DN|<#b=a_!Xte3jz*5s zI34g&!k4I&YEQ~7sO22u&{sdZ-=#$NznNy~V&y#~rlW#Fuz|ZijuL}=k33u{=G*O0 zhw273Lo2~*Bz;>0z+F>1d3`T?;XvlrWv0zCy-Qf^?XU2Bx$Nq3KQ z-CS#INGlBy?n^*rl^afdCljUn`R2mI8vP*zjpw!rL5nHy7NY{47xyEw6Sd>-IA^j7 z5yuRyPB=8R*6!x6mNME;hQ$%ZJEM7prLmPetD&wN9cy+#B*n)r!(&OHmw&nqx_b0Z zohM_5w|jx;_14=Bcem4-334nOJ0*Sgue@-fx(W)+c4NzB8c|7uqOfB<{lgv1>EBpC zlyshbgP)Xf=RuUp*#YG)V`#cfva-0%T%QzhZ>EFCJ|lhSRgV-#lHZ8eYzeD=Gn~85 zasZZf?#16 z0kS?3?iJ*ST_|liaaZj;_q@+UtSo02L+u@1jgfi4z0MZ67J;JB9`#zx32BCJa*4j) zl?uJIfg=uAk>cEwmxpO<-GnZWxPh{C%2)mIo|PDnzO_Y6DX&=NW6wjIyAHec$9m1e zcIT7{-`>CuPWIZVbh5zPe^ssEFZZ&1GZFem+l@&mKc^5tjduK|P}b=&9SAcrI!<(b zHjx2II!K@p;sO4!l8dN}lC^&jEjk0|_`<2l)QL!k|5XTQZ6&13{r$mWbkbeT0NOSGIh1cLEZAtC z3V-U(RFvLV4Zx}R@HG3Ig^)@(jxZRKM)JVMduj?rTs<|S|pzSc&t1Du9NSkgAA#KMD&8Qxy981 z+xI$<>j{x5n^29P4t%~V&_XJO(S|+i%!ag~O9#DPza;HB!e$6T?L;{?XgK1M4{OR^ zEP!0f(2f~)Y|~gE-6O2k6-qD_JKE|XwE#2a(PBu3UkSJt86DGjCtUUl3SVVD&*w_z z6JvR%jdTQ;tSdtEc6U#ZNTc+}WlT!Z3rZ__Nx5*X#owMBZy{CFRXfeUTPllOyV0@% zcdvxO;PQ{X&HkGcZXk~WLUR9ki%g9;1sD0Xi0pAo#Ncwn<<}X4xyCUd(vsi@*aEE( zmzS?>@B@qyuCcQV4TG#=;$vks6qvWZ$`g zWZS;1#ftNxLf^lV+4oB#@*snT+2y8RHF%@3NroP_%OUHidocCLsk9gw%Ab3GwHJ9k zalz^m=hC8|6E3oRO>0fwNRkiKrtd#MWsT;K8(kA3p1NysAM`(h^-F);sJIwGkw#e= zB4a)E$Pw@WhpL9JR1ynXx#0U+J$V#dn4-HWF23Akvt0UvTyuK-V`9%DQ9r}D8#a-= z;q5&Q8fs3?b7M1(_zlw@5#pTbwMidb0^-f|eeNNrw*uq*b35$Ecpy1kcARCeJA_9K zTYhe{pNy+OlzuU4>cj8;DzUgcZ6tF7*^ELJ4i4_ACjFb(5`=0PlT)#m+Abs+xp|x-Q#+ zq${Ty()U}jX>d)(-{^fb0SbT38HnuWy{8LAY0 zHB(X8UTn`R7-o|-f8b1J& z=6mEXd_*j|*>~OGjc93sJn7xT%<%9a)(A8%^e%t<hVDEKN?z{^LKy8E{VbK?mpv?X?n#iDepJ?m#_SoTnqE+Jj_wcl8 zUi!5|n{VKueb(zcy0QRPBGa|H(Lob4Ax-Sh^sWZfU$9tF_WAZBEETe}*irX+k+8*% zB%Q1(8mrWZPen(Oo~q~+lZX!(X71ZYq#azzLZMSHH;@GPWC#p$?KeQ~>hAo7z0)f$ z&9i6pYA6GBPglj0+1wq*bgUU}uNw^Uf%kIg$Pyz`TCII%mFod4CH~yqLYcU_Iu&6I zLCsBdXSoM_u=a*V5BP`2pNX6-pIM9GKEE?HMI=9tOa^D6&|a!bja*}>7kS4YEAR*1 zH&$#$IzLwV4f$H28%LDDrZm5;XESS_E@0f!pamcUX9gwDePzXzLZ@SrQsekhFk&?W^ts#^?#4izV-r>##E^*yMYs)fQ~U z&aUPkA!{8gs)@(n3-4&8(rRvm1Db+n%724uiVzwHM;OSMf<{2Nbv9A$sgm2@gA-C_ zF`xkTPKb^G*z`c|*{|EEnMShuTHh*dHbT*JGB=hbefSRd3*r3txM3YBjXO2&Rj=!U zTGAU^r)xkntgE*3rvlm@pRFMzY5y&kT+UAAR+-E6QK*;YtF4_!lxC13?BC9GnQ|DI zvE&TMXL%Bp`7DtLTBh^%@TF=;66f;b3{pY#s9@6sd2LLQD>(*{WN7^er6@H12VXBQ zGr|WsqoNd{aeSpos)4L>w0bSDAo{l}qtOG3@c!y1jE;YrX8sHa92%4GD)P-a{L7e4 zGQ&&YxtTO){JO8Bl0JIqx=ggrNtO8L1Y`ZvpZ)JeR^igcj`k1yKZ<#ifeQvit`xk? zSWHAQp_NOUmMrNaxfa&kN86X>+>Qg&>rNx_q}W(s=(?}0 zW0axO&W4(7Ir6j0%W#@!8q<{EBUR20qP|AkH{H9fZ#mb|j6YxV)uZ%Xu}Mf}kE6vK z+{g0-kq)uQ3(%=U2gjPkN#%C&>E&f6%lpb?bU#S%8E?_ltv-^%Fmzl@iB7r{`rhMD zu~$Gk`mN_a-Xr|cBR2!#llLBg|M4=3MqrwyIg^Be1_TALT{(v(hDsOxTOB4k*C@(d z%_yMo{+oo?f$g8?5Jc3*(pHWwY($iT=CDYky2HdCV-|fVSAOm~S<+6|BZyX?1uE7fy+HqeYr4f) ze1_rYrlaxV2Fq)-txnry< z%pc-Wi%4{>d?}jk%?C`d03@lWyWG8s=?XRq!qI)@ zbW{##l{5G471RC0(hsLWGPLV_Y(G^-(5gAwSsN{{!K)8p&5#piN}6S-U6B2D-YZst zDXOTBOitj<-;TQJ*}}D97b}@B^JoG%dyQaKqF(Hc$;S8PSxD_eGqtBkpz%Mg=AF$6 zGMX~bI*NC?VV!HGHSkXV)vPj?LweZkN{Biq4B^Fy76L`U5$sVgxZonviSQ8n|4BK` zl6J*|qPQb1^aW4nSE#RML4}L!4+}sHmT(1FQ$k+Z?~lirl@Oj(-bcuQx}RED5!roh z?S}6*xrbV&p)YP+UsH3Odjkf+uL%}pfYVCpK7Ipt*QNjQ>$ktxJ0I_9h65q5qb(cG z9f^wDS)CSBz>S@z>U&!&{2HHM5FvxGNpiF#2-%PWN=Zyhcot42kZ6a%o0gVA&K?2*#kn2)Ei||P$+%KJv?Xp^wZs@>R?%{Ao zta_6sh5xJar^hLymEL%gsl)I>?pF_Wao0N-EZ?6kd$%- zmxaJ;@R z3BJhbF;rrweHKOs+Jdtjd=iu-5X%l|RsO!SBVa6Wb=9ninNW3*B9W@2ofWAL+yG2t z>^3GMHtQtb1SLsD6sw!Jnr#x&_UDMzHU+3@SHp`I;9V^Am1(*RBK@s-fe))4Q z?^R$a*t9D0U=CJaXmy;ml)oi_YMa+6OAmj_!3|rY#XgK3^+*i5)V8l?|_u|j&Kzs ztYR{EIwCv1s9Laz?d%L&OkIs-=AJdRwJM+~G{BX&;8W4nfjt~NXbe>WaghXl7)Y#Q z4azGt-=NME*V!LGwX1B@d=4#Xl5i6v$~)g-W0FyPuq=29MjaJ9htQES@)Z=H= zcoYHhb>YJ#EkIda=4D5QY#vFOI-9j?sSX=0l8(owJ11QrZL7g2++*GM@hqP!U^VL9 z*pB^kL1y=r@xnlf_B&MiTuF_2gMB4Uq{?mnK&vXZJ(5>}mjznxp#}^PSTMAKE*8gn zVNaGntcFmv89f9#fk_sCQ-?@pqmBn7)9`)f`PI-c@V*^wC)X-*QYR`&wk~lXrS@U(k>0VMk$_Y8E(#(6VOvyN3^kA^$wV5$pWOLm26*`*q)v;uQxD z#ph_gB8Xb#AnRDe6k8?#*LxxPp-LMlMY-suUcvm-(iT&4Uijs2#NHKnYv?)|s8|WT zGK7N7Q6ZB=J*>kpSB%+TR3=!Hz--&AaES^&Mnaw;1kb>m%zc2`vKVxlA^1V2OKvIc zzrvjbb#Efw$4^)!{i)b6#RHj2?jIIR?PMI<%fFCkrZxybclmzf#O071Fmc7mcKm?~ zkW(r-f#QQo6C?=NN19<3&cAaPk_h8Nky|2_`as|aoB92~R3gvzMEfqcs5rklErZE$ z=pWRUMy@XosQ>j$!OOBT(>`)Bc6j!&Xq>O<**(zM=nFnQF=Ch8oGi6Q;Im9=am2X` zJ`(gHI}CJ6d|zCV8UeC3X@BU&E{3s>Fc}niS9BYluDMZLPyb{ed8Vx0es|u0t*}bl zDJv)eR&ZEB^Vu456UwTeK>rImPsR_qsVu_IIlJIco1mbX3PtW`3$Ph$ViOM}o=ER{ zXvAh%a@nvsia*9P63`la%}&NlvZVKaz5+FKqK$DoDISF*VBZGEykekzc=od)G&QJb zt-;}nBJfR|OwPW`d#QiHA}+&Czas{yCTs@r z_XBHpe=F@)>&6d5phan3bQ$EKjVf#$MJ98O8W&-JT3UZ1Cc=|E(BBlZ=QT-7wk!k2 zQz@Gdz?%nIher?UeV56HKM(=c zuj12W;E&DS-}F?oP&I99fJJ?Y|9N|LF=!r)pUK_T1o2l=ds$Q6tj$5Lae8rHM5YYu z)Ml?!icNHSfRH)6I#d`GA_#S$?z)I*ZGl*0a9h4huC;OJm;ntEhO&c@Mae(_YZa~y z$#FC*<@Wr{;@YFgoFFl8-X>7Ih-W%pR`Z|Q+$v%FN-fhG9&~WVx$A`nBvTP;`;xNH z8z8bw&LQa!2Fzdy=hYMxG@als{Y=RNR-3+*u{`3(wEUc~bi`sA05zpW3h6*E5U1oGfKb9rA* zRe0)(Db;flJQYDs*&h)pq&&aSJQq@dYhpLyeAGEqy#8v2<$f%gpGmoH)h+UUB2OvE zR2fs1C?!K0kF2X8YK^a6p}m#<{7H?N&FwHc%9Rv+Byl%)d!(@*phA?I2O)5#ToLO( ztt30x3q*}3gC{uO_S5uEAZw6stBilG5v=&YIEw;ertkC3r3dRSL{&#eCPr#WKLt`IIp|GR>0nsWmZ*xp3RI|eoy z{EATLjRX@h8o+KP@XN4BBTF<>jX|FvcZYJBH!n@vJQTYDEZuWvDECi%|9-lv+CjWd;(n@pLKT4R zzdW`b`@J1aUU=vuTKy<^5GPA+d=UdwPrAz31r)8xAI#DPuj|OC!Y?6&!_2&VtzuT|y^Bv>wf*{O;cN}lN%!FEZbG>> z^;6Gd$Fi>=5^l}aX!>i$&v*0n8=PuZPYVHr_Vt^p?W(>c<%s6Fk8NaHBB?f35*g1E z4??X*;Yu1mtD?YOASKZ)PJq_kJV#Qn3HK*h1P|&>uM)w7`wUuF_mO3m`CV=T1*vboPdL$wzkjUTXzGWlE3E)S78NXVlPJd`)Pi&) z*|JS#6CY>yww80RY;ME_mTd0LGA|*Z{3Gv{5`%{p+TMeONI)B#ca-4{nf@hjmChmD zwiBKqP3*B^g4l_Ls358Or<8z##y~L7rvJp8&mIq_)q!#bh$9&$D~sX#T5~2Vf|dzRs_)=WW(OGzSpEMG%9bV* zOeA1L65ym@@BvWc3dG7sq2xxP6r}8q7T|LT!`iT;>?RfsnGJH0kC4bFWT*)Oza15@ z&c6hOOGR7a32}ka=7wWjcoTuNNQ@==q$OweXoWv`k=55%q9M^<-2XoR@!#j@S#YoW*TguO6FC3?<1@AP$$mWm6^@#Go=Slvzdr$`! zzzbuR;D`z)@Cqv+R|-4n5T0UEcBiy6w(~P=Rf83U1USs_;HVOf^mRBklV-rC?{9i? zu`-^yAUN(imVu#hHpBjlZF}=0_y#0sO2Ci-k1yB9bK;Tg;hVQN!9=L&T3R8}A3wbb zK=uzY$;ZnN$)d2N^cN*UKtPRueC;0(qx?4ICq)4PD>fCfmD=0aCLz&3d#U`tfA{|m zPabCL)NkNyZn53!d-70dZ*FLxZgFz#fmvNGfMFOzEsf)R3;s?1*_A*oJrPLKMlBta zfG!V%k&qw-LkraWIgS5&MJ+;eM>)_ZU8ZfZEEA(Dtw87H7Hujsp)|}Y&Gf;0`+(k2 zSA7}d5v}EV6+xOA?ivOYw%IQ~=Fs4@A%+v4LvG?P9#LD-{zVZ#&*CG|9G}I!e*0b# zzp6R_hvX-T&sRo`-hmc7)%kPQM+nu2+u{XdErDyXxEkAKcIg2C4oXCmp@Vw>$X?f8K;tHbY&Rmk)tn`G0 zT$1s2LQX`(?y0&urrz!u_1G&+xEsysmI`eI_tN>5*Qrk;;xZGt>|NA$++l5tP6(a| zo;gL&Iu^iuefx;Cy<8Q#C-(mZI0MK0IJC%dC20s|n4}gKz2@spU^{R$$?+s_0VKIT z$zg!m#jGy?M}U*S2UT@q!Q)EO5X>-1le_54ju%o@H?6b(Eb!2p?Zd#~MUEv&Lo>r9 zwXo3eHMVQFZ);tGBwZ5oOWInlx^m+qRpwt`y8M#tnZ5IDE;+3d`XfdZ8{ELk|aqJ>UsF; z_BH4=dhfCK{=gnJ-&T0e?6D8M_c%&_-Cl4nNs_b~^?c?_k|b%<>3aA|@_Le=lKhb5 z<0RL#u4za~f1XHkBFR5V?rdGpkdmwTGqfb{CHXDMkF9GNLy}!dzD#m* z_B;Aw>zbBL@A(D&W|;Z4{Kn3(u7@w+C~#X<-46T=ywYCE4A!s=>Vvth6p-MCX8)tLj$ZZs7S2e}-f0e?7mT!wfUOx-n{obv=Al)#tPO zvH^Sz>}*-j(5mWWRh?>G#|Z8MPFK~Ls`?u^2t3-jra@KJG2r*sb&RE|eyytSXJ7xx z`FkSnXj|2C={>)o+YB?mIxu#Ibv=Cj$Fsm^t&7lx>;QgQv%QM{xR+Lv8-P22Z`xL} z)XcBte_?)gW{eEm)c1DWh$LSC*UfooZ&iIVTeFw*rx5o~S=$gc(qD%%XP#tL*z^*mh zJAt!}YSAT1as}r@YkyUJ)ViL{sQ3JWEtz5Fe^)2Q%&@Lsc1*GYyaGH@Re!avM;rDn zaC4FylWYT?1P(Q>N&iEV0Pg}PtLlx`^(-UvYiXHZ9T+>qx*ooOH-YWI(Im%{yw$p( zAtiY%$trLU@M)4Gt!o;>>>}eAfFqp0$nk@!I?=YOVI?^Ld800000NkvXXu0mjfUI+3X delta 52232 zcmYhjcQjnx`#wAfqDS;DlOjQg7DSH`L<=$zQHJPU^g2fg5xw^y2+0t=k1l!(qW2!% z5M>y=$MgCA)_T|S2gg2V@3ZIHce(ECp5j!((p17|TLJ+-!59a|8zN-buF^1jKj4HN zp`*KYapnB8(ml4yD(!()+&}Wr$n3}*%FRJfA9Er_LY{DGF~4+f0?EKP;x0rhg_HqL z__>v+A4|R@tE7FUNc8F#6PvjUYdcM7c!1Q}%oky`;Rlo6KV2^_YsL*H#Vgd(+SJ{P zPPRsM;u^;1w&v^(cPe*XIp`4{qK^Tpn)5@mus7K;*~XE_ozkm$6rZS9_bzB5a*U-H zEjHr2^?G>+1gyg?UVWDrST>(!EQi(+14r@_y|`grz?hm26v>@}qTSuQ2Zs|IE42~2 z4<9s}{Ym#?G8Jps<3MqYIsPh1Rte2I(abdQDY}oc#b4hvjl?a*2ZgX z9M-<%f01Ic<9jr@s7Kg7oj00UP04TYVp(b({WEB(5;MhdPW$XP@l6f`*H~|5+E}k8 z^~9g-wcm6~S?!`GbIIi2i5mL9STqVI@he-&VX>NBxg7j1zDjfYsM^8I7-^QFuUG@{ zGY97u%sgK93D!kM%o>OBQ)2=4CsWPYTagQrz3@t1SS0bK8Vvap1u*W&!e=42kOf6| zjXjk#WnrJVoz~7ntwU=o;NJsWPs@(AF!}E%oSR#DtNuK(axs&A_Atrn{3+SYt&aKW zK3p&zWZPy^dF`{r-{}!0zMFA2Jx{{LE@xc3YdeJwKV;W0SN5JBPyxuB3fztKjeCNv zN{qin2Icnjd|u;kRY{^*C5mk-2qxRjH5+;719iHPdOfyw zH8?nR%tB+!EmZiVY1i9rI=sKQm7?mCjI@4doIF`xmIQ&O?A96}8|^WfZXkb9V0`q& zbh=6a-9pj0XEWcP(Fr)1aF&NXVbuTSn&nL;7v(s+2`Bz?BsSu)A-Fks!R2J|GuI}u zqdn>O!Dtj(Dggc|JW)w!w3cFc#LT07t@O0@sKU0MRaxiO(MjyfZ)Adf2R4P2dOklk z(UiyOAr+sM8PijO%60M91s9M{GAWex}$>n&J5u6?wL>)wMf2xq9or# zyC`I;g2fCBnM8OXHd@-OgWeUZ*K^DAMXHjSoIN%5#b1CJ^3 z*=(D5b@-=XyhVS2@7+^F_1|&Em)S&Vu~OZxMIj2*b<3Oz0LU37y1wpz@bn-6OZCOp#AqKHo&dN9NBa^HUIHU$Un*diZaB`;5P@ zRbf|4PjS}0u!u!U@|1<`%{twQInCvn-P|{$voCs8)f5$6Zol?xIe0*oc~6U0af_|E zaBX!~;_Vp_s>3m~MJdX5;kf7qe)gx08jR#wa(teKIn6sZ^bY0JM-e_zErBfzatgLD z-7vbrblnW>R}LqK$ASEmp8887B-0!XB>hJ;M|3L;ke9uAm6pG$JVN$zJ|Hh}V^m+4 zVj{hhXYHQcJg0gEjP>*CdbUP8v)M9*Zdmj7TCoCrZDaLUb{}W$in>Uh2^DWH<+mKp z(yQMj&J?)UUB>aoh3yN59+64FP94)k=!VCEe=DXM0_39@@QU#od7;z`iybOBxAb+9 zPt?bzZ_kE`Zxhyw3zvgMmwr!$^o_tDrNf13-9vImvtq|RN`^ZhxP*(xm zmOWtJm+nH!yPM8sonac$_<`{1>`Q)R&FQoBwje#TRei6_%X+j|p`(h@tsoD&gj`h8 zJ@3p~QXcSNE(vKM;x|=A$@MGfe!U_Rhg{CKFju(K9O!NuI?;YKX0{3)VQ#11M2US z72K4_NQ-rDaJ+g)St0x&(EHBZ@S|;~Ubh>%trrHuMKV-=YZMGLt7Ra}+wSHMUg*-7!YVqY%O9McKIUm7%K=hQL@rpdv0{VaU! zGoGAza*sg1+*SIMLuVXa(bgq~QO~+Z35j zhq%AiZm_cw0EvJeS8m_PT9()vGJ@R7ZuHGv!rb~Ye*qocJ8S9=@7%3=p_MTDo%;u2 z80HtnFP%~9trk^c4xg=;m-8AKi)wpcCN^;VGfjylSF#7bRrvso!CdlXuY8*I;+b&*K<|;8mafFvoJ&Wai!q}h`jXDx zJ@SbXA8StIZVP!>b(=Dr|Eudr!oNziA#VhJ>zw*6D(X81Z=_!$m7Q!0GM@`G3lcch z%|DRPN5bC>D73SrAY|irOg_jVt)lOPvy8hpLYD4hS1BnSr9u{?4WIWGe+IYX2Pwg| za{(ZrCkQ;x|Ln&tXK{D&l21^h(777w|Bze>&VT=}?L94bw3cMl+2L%JJoDPk<4S^; z(}_05W0F4e`3*7|bdw_zs~pbie|&$hQ&!h*Z=dK3m^$nF{k0h%o}cV1Iwub6GIiee zaU`BxdHybkl{p9|NB;`N0-xkHi4~xl?+OY(v2iX$nRc2|Il7nkcH*Y0gWfqV;XKghp&2oLs+&HF3Q(qc~t-9@Zblo_2J zgSIqxwz_V86vs`%Eoq}u^~ge|dwAw1m=3V3q$I$x4}HV@>{GHpKbNE1%zGC%e^M1G zyy)h~Xn*IvOy{OY)UZ>Ob&GxfJVESK-;stuv3@{)TjO=CHYZC=*w=dGGY?qE?Ld1= zl;5|H3-AGHd#BFhAC}tP_b<@J?8A0Uj3KoFW!@ zA_Zg642|hAmYdJgW}ec%DtX%XZ$kE{c-{?v-1l5U83PyFKg1p-JqUZuZtZ6mFD2VX zSd=E6G})ig>2TKo9j^bN={t~9BXBQ6bZHa4yx|?& zLeCj|VYhv=-11hZ5fCuBAF__D`lX?1h)yT?IlZrCcEiAK_l>C>a@8B%v(%p($XBhe zUX(*U8iLydZ9C<&y1SRVQh+MTmj2Kt*nGk?oR|e^^MU(-Cq-&A|6zeaWNy#fvr;u! zE$?C@7_K@I&JP@|M_~`&A=$)v8egjFw)2Aai8NrY!J`~lt8G8*F&`77F#5G@^7xOS6v3YcZ1Hy~>}}Y`b$ju^W1#kK#^;GReYvot z_uZXi9B2K2@XIl(P$vS*c#{(Xow}6ljHnmX61$P(Esx$_z4#^ZSC84GW1u)$?T{de z=6KgN4Cy8P9lBa>IqBgkhdhGVzcJyH@MB@k*rTJN&u|lA8=N7NV(YdkW5XPXDF-Vk zBU_AuSF1&tx2#W>b(gJAZ&NJy@QVfDNT~;-PEx-B?Ct{Q%;2u{N$_0?LuBgr9@I^< z%u@Z0b_V#-XH}IiL!M`&%V#60(#8@Y# zK%2dpSp6y}Uqva1ym8NCpU+CdVl8THL z&-qLo^1kSer%UCD6oG!&hCHP69uxR0rZqmkMC3VI>T^dc9Sjid5MLqbLZ1ZL==UB?eU@sRvC4m#+ZW zpXw+ua>2?l8&kiMjt5zF{1 zCc3FAeU;y{FV1sB7yEVnLb6awb?L8cmJ42TZU^W9}V|8OONK34LX%%~Iq+Zuk-w5O0lc) z!%qfHs;slHdZsC>wYn0kn_{oM^)-H7+MPb&fliUP&`v#=EnU+#JvK=rX^M*^xapBFX zh7P}iBo&637|pQuS=3uP?DCBHShAFF&i96s!3WGb_5&U~*|^8KI)ll`pEE)f+=~Pe#h#V)Et?ob{~J<54au zq&mc5Z0255vtS`(;c2SBdfh*EC+<_M@8EmT*{K|LGYFw26>f1O+ij<+4K>5PRBB}a*9s0(FU1j zyk%HV(sOa~K(4yx4#P4uz-#D}lI7jQxOHJt%=#GQ_Ii@yR-(;kqT|k#l>?i>9I?W8{*DgSc@L=BLU7?GrX*vO+eSo~A;o{Gb% zGb;Me-!{#@KTJB9`%n&iX~KFUF=cqBMAwHs!d-ly1tk1p#$&*ygKI>nIlBp~*u(Hs zmA%*8EzZ-4ZK1>D!pob5X8_BKZt@}oFqoF#-Bb&iuABLajiuaj?!n-8aP{inw1;mf z0EbcC06u{Ay<0tdLZanN?}%$hWH~g?LM`Vi5~oHk{r}*eiQeUEd=*~i%iFl1eqtAW ze`0*?I4|5m?IrSPlbYxGWeBrU z=P}8UM4~cKE3z@v-5Z6~KfJ7t@G*h6UU*ZbA;2}e&t3_DSqX2`Uv@~qHiT4o&il^M z`b@@35-OX3*i!83kTZ5g#~LXG@+i#1A3;hv=P5mY}B2*LGdTl@KKoZQ0r#Nc;8R+3du^SHzYMXZ^#X=uy^`IDNtR(Xwu{*A^H&5Ja z>3#$jqRYsg?G?Rg*VFF}tCjXI!gYG+(5ROv2g$I*mw z+GY>$VO?^ul@~?MOQLM7`3YOLss{y+28w4g*t!?olMsOoZb9yxE?I+S6Riom=QVG) zJcj$-;^WgiR`RuY#a<&_pDyNWDI=wtE{9uWipAP^n?f!0+mld@H}4VTP%YhCFoVz1 zbDTeMd}h=FhXMHAeyV=|>VrXQFsC+DTacAkw-_k&Tr&?bv{ z#+l@jr-w6YMvy#1V`9W#FWtg%Eu4tpPp11Pvho0b%KWsZ5g>l)hckTeaNubpW2xXa(2F35q-r!zC{UwWTt#* zBnbTKB607(-m>*GA6X7pb@-Gz(m+Cta_!TKXdb&%6KEaxGIYLO#H7u*NgrCrv5P1t z!~m&HA^Q1xfWTkd&`w_pz9Y`1TGt*@C%c-lVqd}T5+7uxZ7rWJi|u~kBh6dkUHpuF zPkM`&QbGj~Dt@zs^P>0;UjhzO<(2YrrrO)Bo%HRq1C`FbR#S7T@-0-nvONrf8uLVc zEX4e<4@#EyehLq4NZN>#$Gw@aem~5%I~-Nw?#~u-2!80YvoD#=snV(K{xvQm zos*X9$t!IYkT^cxJ#C8)AkZHgIOk{J2gD;;6o`0$+W% zDEuBUt$phW3Ea&HL)N70ht! z>qQL9kS&OKm#VvSGzU*?I#-f_c*egwP*Qof`Sa>qgdv&lvwVs1`$D;HfE7IP9lc_e z4hx8-g^;m(aOm>oJ(9feL02u!-Cl={81#R)aDQ2!*m0X`*(xQ1D-DY_gGhQaa+tc)E@J?IJ3VQ;XFTKDwPv2st#`g(j2v0)j;_`ZfC{` zghR(ZBX)V2KGL(HBshpHWX_eYwnkAJBt$yA`5b4jkf{$r-VmFq*v4w7SETmFg7U;2 z|5xC1k?tfM!eyd?7=Qo+_&%KD4P3ew`OBqgv4_O9g$v&O}sBu zEUPjcZT0GzKl*H{y<4WxS=(^8t@&}?yJ*Bt)Lgu6DUi6L)46NPlW|8y^B0*&n8&Ge z0ZE+@AHokL%DKgPd(2{9r~V0O=RNzKA6MB7?jT6qkEyYf8psjauVufWwxdEsy}+xD z%xh^dYl!DrG4EF+x(Zw!cM+wlW&=pmIia0+YR$UMpQtWw3#D|tGh%xQ?i}(6-$S0~3Om1Do5RI#^p;qe zLWx3j-VB(Xw2j9V6`C8(vxBAAWnY_h0{KJ0X6Tc&T1jyP7-id!`nXoJ%tFqLmkAYO zr=!4J_#MX?@#a7++m02-Fxg4@;Fr&@`^>;Q+CtR-Jj^m1p|iJyAysvKx_Y7<-kXZau55Ol~?Y6kip5$D!ZbN;N0zV#EO^TAhQ674~%pMte?R)${1H_|vz+{i@ix ztY!YXG?2U@aHMeteSi1PqyPYIIv505cJNg`O0l&a6wFY>N8yW3mlaB~!}xRNa@;1L zmYt|-nSs_!Le3&?RfmLS&$iP6f1Y&}W4`bwU@sFtgL$yF?+?osGUhr~f=GxW6xXZw zoV@R`|83o}y36n}i@LT?U6#cAS%@SRb<3e-kCrrOUwD6fQPc(=$b)&c=9_X;)g2n5 zu<|<$nTDa9PcZg4r>cbEdq5Rn2;U_Nl^%=w8qI(Z)&dTU^`{menk4-_qzB{fc z+u0qJXFri_QNhaF)8OO4*sy$UoLskpj)=j&|0MTuSXiw*M)Hn(R+ugF!yid__QULp zv`mHJ@?iZGuMJY`dFMwvdJd)+O}NvNb&%&lQ|dSZD`DkXTtc~)m|yZA@32^|?_hlA zaNR%e&ha2tj*f|WrPs&Ws`_aLALGRoKBf;#N(OVlAjsVV8?fs0dXl%d__k4dlDPFJ z?g22F$e?a8f1F~!_J>dUuZ{J3;z<0YkSTrMG^_gx+(`aN|JY^s?qS!_-Hd{Ccn9(2 zCO;M!7&Kfv4cleM;AGWy>9cifwqUtZaynRwk2%6ivgW?*8Frb)`2490Lah!f!EV585q*CNb+p%fG@DClkLfh;SKr zQKjrr9EW@QdRt98#%!q-u-6;BlBiC%qhbSa4UPYP4W5o8yq*rUFgpLDe3Q&(J+>gS zRb7+4>x+#pUI#3G@#3 zy=WVwNjOt$D8F_Y{o~}Y`H|<0H`?t(1v(hxN|t)#D`+;?em96}Z<~X|1W3SJl_d@b z)s3b#GWgtt!UZ~#Khh!ca)HN1Uo8{(jzOnRjbY3a@M~ad~Vv{K*r`! zej$STjhc5;RaaFL{ZBd=P5eI4BcKO&s6FLiP-@TKJCW0OpiT5Kh|n(QW$}Cg#m#@! zO`&;+tMB0Ok!Z7Ifjq_zbnC@`fj4_wDex8bbf-_XR=gbO2|3;c7u5I9l*v3i#c6pH zt2GLiJzof=S*gK1mdjpFrMc0gADO3?k_&avw!MzPYAv9l2SU`frlA8?s!M~0==iny ziJywje|X!#&0i~NPYUtTfFXr?ibtXrB==s8isI!%IA~(r*_V8hLb{xx2Q$GA7yCEW zjr&>}fFlu=jnf8-(Ui}5f^DRSTa%r3H0CCKe<>x_RN(U>b8+9TopIji5FZ*b5|3lf zPJ75x$bJsmwSN8SgJQR;=w8ZbJ|2EI?LOMn9&(<(;`5AIbF5i7(`fFz8*Pj@R8aIv zWZ!;7}r%XY=OFSd#s5!>d;R=))M7`4fl-&sG0H<9=_$9JarYcCg9;{lQx zhm1nh%+~8PZn)g*oe%kM=WT<$g>niDe`fKFy!j6fDJ_MkfSWPD4q^s-l1Dz#5a{DH%iYb+QBGvbUp2G51}-HFY*5XCj>-;!(N>Aj`J^{ z$av=chb|!kU2xXi>Lzm%Z{TF)|K>0S=ZNX)9qx-kIuQ{3hxBBo4iK6q4ag12x@Wcp zcfb{E_g-J|-QVHbM*|$C`DGw}g??Pm`$m38vfekM-98w^K9f0^#)RPFLI7M;h$KIB zo;C&*$a-Q(jr|f*&kN0D9Tbm9c?f;prk}ankn&!%o%ZwpKspN8AWqglr3r+*zVNHhq59hE*Y~8KXKUi+VDw zy%tZ*^$oRk)T!sHcKl+^1oBlXi=?;WHNan zGB+8yM_5Fpu00yKqlMF6xIrzh4(stN_v`F*HZ*yl<=Zbw^h^)@@U?Fw8h0eW2N?H~ zi;tT17nrEA-$HyyWMTASUD^&&WJPa3O;07^6tx48E5(bbCHlKuBd47b?KPlcTXK zehls@rY|QJzL{D2M}}j@Rw#^eVeT{39-LX_sR zZ(AraSnDc0?g0DJt+@e&ut}fG>mI59yIozG0W`iE&;uFhL4AmiHVhRyoA>!1l;d^` z1MJ@Im}*+I6pb=$jtAOA6O*K{64!1&2&0K94pBmf;PNKyQpllYs<1|WD1XNl@PePm zEiF>yTL$7)9g{iMbvtdFoM&T+%!A0|3#t+CRLW5;W?Ey*^sU zj7!x}{79m$hIb}v4e~#nBrnHtQ#6FN_@spz@+41V#(r_Z&95BSxuJz4enN#dYR zfN6ZE?Ymt8B^bAVF+oy+wR`Eb&}_UHvL*aFProgw%jDL_fIyuU$xHD=86X9IaYu~$ zI?Ilk#V`^4UeZPcsW8Kj$7>HiD={;?OM@S=fcx(9dJ{`1BnUDmFMpYuzg50=3McqF z1o=NU{ohx2h69rt{xq-^G3rMI1NiLV+xJo5qQ<-A*w024@wjD35{r;_UBSq*?3=V1^pEmJ`P6pMB>ufD1LCqXjEWT zkSoIacQ9s!6^F3dJd$I|1<3{e<;(`qSN~|Y$NwV`w^V2V{0x~vY`a~fvlE}E5d?@l zYzk(o-eSV=xr?gfzzg1>0OwEr=yo9sJ5$Kjfq}=N9C~o}SvpFfgEcY)mlA?|IFPMQ zjr|&eXxzygo>cv zk^F%GG7xrNd*^Tgp@@(^f(*zm5z7C5bH2ZCO+>}jV97FqxSoDOYkxoe6jM({FwIpu zY0eC~tCb6y{S&B`$W$EEDJf)|KE|FJ3$CO%U|sq?l&T#8D8k0by?2UXD}k7C;2w1F zmrf*jCYqR3mX|Ju**gf|UHWR=j=8!G#PCsDQO6u%UQ~s2NkN@DOWbf_A^PkduLas9#Jd5#rqCceWjgCk!GkJm8n7vj4%&68LM*a}eDOgfWS z>+EN-WJ2HG>8lA-m(*Wg%HCvM=p>~2)J!M)*xqC=&OfSS(C)?nJvBel*A9nEJS_$A zkU{7GDW)2I*c?sMG2a3o{0nhCzQ>B=j!;K@#XP~+5L?<9ZJ4QCi!&Ael?p78;1~A5 z_yzqNDm;tNA^gzd7Q58Tc|iNOn-B%)c1dS%`sl;xDS_v34p5>8r+9rtnZ@dH^l%TO zwh#d=gI->;F0i1UK??ERzHjO*pJ2uJT^GXTD>lfW!-W6vZ5$0Quum^k;Q(vXIORfb z^HpT#ImvEfb2nzu0#D}E=+gxa{~**E6TBa_`E;T2l+llM|0Ol>13qd9f6RLdA|M>) zMgR~!B&NCxZi(}?ChKleqi=E8BC>yYK%>m$*c4-}cK zm+2vebTJIeEqMmbKKfB3`zL=}g47$M#mOdEfd+gJ8=tfi9%QDo-YUb<-%cg{(8ZcR(AvaJ@^8b+uuOKK2`I((>Rx>STAxuFG= zL((RF=OT!lX!3R?AgAwQ5tGDecLT{<2p+Eh-xIaC6w0O-`D4S z2RVj3qOPS*b`PocBppZpV=^j0y}&GMK#Fh29K!CgU7d}1;M$Da+h=V28Azu_DgK3a zap1{^ioxxEMZmppoz|g_+chq3{H2d6P%wQXNwn+=J;=v+4$#F#m$Cu`-#3v90t*Gr z>TqBCij&V#=PE|6NoxX~^gNAymeK^Y$NwZLd{{*Px^VrD+LB_1OEzw?)Ra;ncs%Nf z$8B8cf0I`%c_sEx4J967o*^1)pqpOs3pd=%{4!D1@j{2OE!*l5i~qQG2`mT|+3hwvMF8Mzz}q3dFsD&3#5+)Rbir4}iAu5%U&S@V*LcxF*PCue!DO+AUva zOv)IsGQKST7A9rTQ>zY5+9`r|v;&Mh{?B7;16`r~|GN~=?M6Up)~n$*DWz#*9$l{S z4p|c99VMQK=;-4uMis2|^PoE8jYsNEH+QH)XK$E| zgNY`8?-P^0*J-~65YV#Qr!)&qpQI{3e)YzC+7VS5e!!jtNrI&RA2}+YshE-(`F&r` z^emzu4nLEh0Fn-0H{yV-;bm#!fsC zWkKFRF_DkDYcC#&@fi`VIe_J@J|;BjQ_@cPzdNQQ_aFlV$j8v8a1J#6TgogH0zU&^ zozReo62g(l+$ELSkf`_6_FGk>zec(%sl$1gd-)PwF`wH=xPW4dV4q-L@(*)w z3HGn^7iSE@ya?sCGP>LmGu)oG`g&92l}l-w8Mqtonaou&YFj%&9b09GGR=*aQ}N2t-9BGcly79xc_($aqhUq%GUtx*sLR=#pT{O z29Vib|1bSxrS9c733YfL3iiH)hE#-gIZz(pGQ+y0sj+6bt&Y!P4yCx!4WO<61o`ti zu~dbC<3S96&nPDLJ7Hf-z5=;{jwWd#x%J1P<4vnOeP5cg{u>xhTmphr^()CEFm=<- za)!*0aJRej2LtPXBV zlq0oggxTcfr`03@^Hx1lo*#jTWpWzuU zAyX5NkUUH_kYYB8jK99~xtcOq2>@nMQ!Ky*Z8Uw%R3Ix_4nCiaa`0TYvWPmLxjG(9 z^St6&{8*=3f0ZyXnj^3l@)pd-9!$m4f`^PBdK>W7@|lb5mUg&AVcHxrsG2{`O9Ib} z*c}=+wTi^t=^`X9n`^n6KJWRonNu=pqj;M@CdKag(=qy($IY1pmC#|}=FE*;yw^u6 z%}mYTptm8u&AKQQ&4*q2ZiL9#>pZ*@@5@aL0SES%X49TX^}K|pns5hg%JuqDYD(bx zKW!s)Mvt1$K8uaVoE~Xh23}ciSo9u7$g>wu9day_@|&n`{I};3;2PPvj&Ht^NNzPaI%0azMLo10^Y9J`u^2;<8NXImRf zK|rNlt@%ZwK!f7fwg%8ybns-#1YrpSnVu%HmXW71ffR(ohpp6dtEpOFC0PYcB`>7Z zQNDq)bruI^Cb-p3uQwqczuY?NkJbWOA#)^<=iiOaC|bANb_g))lq!8nu8-1n>wH z)+!^;`Eb}U%k!yGVSPug$v!@e_?le? zb1H#5*Hj_lTf&Nh&8-)nhJI*2ZGVDhYxo5gmOf28f8ER>GcquzDsf~nn=ZKvoJ$|| zMy^|AN*spFIXEI}TOPlqRrp09qku(A%hBH8y#cDZA(oRa_tAz(>enOzta5wO%mWBw zKZvnAD8tKW9(1^PrBzQ?mTkLMhD)712vRSqc2){rJ>&u8MBgx*k~Oj5v~Cc(?VS>U zYKeqD&9brA3)f^6e8StL1(>apo4O6vtiHQmyrvX_3bN&|M(la{Kd0{Ry*%G9zzKXC ztISV$l~5$Xj}Nx=eUMxBtO@Z;d_4YD^J6gHQvL*6QzxjVB4G{?JqDZ%>sa9mI9gEP zXXWYmQ^J9k!4lfN2a3<=c^^znhoA1U#&Qz`2Y(Ii+&%&H+Qih)--&z?+B8J zxW8k>M1Sn!w@xKq1&R^D-Cd~F8(`E%t`@T?+)z%aoxh+(@iB}Zc2oWfq}R6$^;_4j zatt~j)6hjG)cnu9QtNt#$X|yW=SjyV)IV;=5W($ILJrfqox$TW*|1EN%Tw@qf8Syd zaBX?@G;M06D0{iv^-ulYO9+Um=+{iOk+Y^5HGyD1;l1(FG*IVK?;`$~O8*7eBw4!+ zVMb7+xS3ap3%Nhyft>(gTf#f`8dg6efD0T0cFW004mONw$k9TP%oNUi^|xN#-<~Ct zJjbXZ8osQ_8ARgu-Y8+``m5%5dptEQ}V z%lls%BXt|0no$nkr_Zt*?ZKb_I!5t4qz$p#EtDQIK_QA@0V?}9p$22k-kpu^9>o}l zNI_BV@(=V)@8f&xS`l44lax0>3DFwGuJst5GxPZST`91&z^YRPrm4ipI@#eB1v2GP%dOiVxKp%dP^LE4Of_(m2Cp#GTvIq~+Hjxlhfy03qwYZe z|KVfYB%d>5%nPtY#`!}KEcoueK_si-=RaWuATA4#qxD%iuw3fWFDTpvMW!aI>C`;u z>FU5E2>G;jW;N$nnzkdBPwH1vO#|#c-<0v;Ssro=3CEGLctvb`1?xeNxfz6aqVTmIRuByu_ZK zT6v-uuZo+jc%0hwnaJ40degtT8Zu z)l@n+$?PcBht%K>%=_=c8{$Eo@`~4msNt^DL&oOf!UqQBwojU@6aB4YjdNCYX#G75 zV-d~KqVS!w?%YMhRpxm=e`e-?G zd7C?sg#%`+R8;;*x4c6o0knns4^JPNc3r8&&Zx7hJ6`F1Lv89F$AJAr*U~F%ou6iI z4QPCuDIbbEUo4-$ndEps z$L?OXt-Y&{_id;1sbJbju*n97m)nC`t)kM1;1j zq{`GvQXr(-jZ?O$U{q}aPf|vW)-*1o#OFbHyCoKWGOk1L$`CEt&F`2poXU|uHBK{c ze&bP^8nD&D6N@`of1UHax5OiTd>`jjEpS3$R)l1jGI zVo+9+V>CLif4pQRU9S_D(!Y|7TH5+x6cHjB&hou(utP<)C`b1@D60LWBvKnd@Oy6H zH1zRShc1Oqw4Pu#Ba5(D;@3E^DTf@w&+e@G5a?mg+AY*08e{-9gf#?9MaOmJ(rK*I zU^nqy!_c}Ctr5rnSABQ3cV53qj9zXCeF<*v{YoPNQ0{Q!96cjYi=3*Oi>{F3=t3Y=KkdWweYgdv-7g6VO0NU(dhZf5L+wcQrD0ufhl{E zU%ul=1)@W3^s)<Y>py&KL-s6v zUbba8?#Ite9~dnTRy?PTo-S{r2;UpXGL~IFJ7lbvj28d}*kIGWkba%HRcxHcrI8%h z^FQNpGxmK^pZeEz%b-ZPy*HpOu|21(chSWmf0%j%=k|P;ICu6NK3kYtdG7R=qhlcn zOM(Z6Di+L&Kzs^_Uz==m33ewVc}bPOCNv-Toc|qceV{;anV(W6nN$6C^w-mDbnUHl zkt`kPui0>xNuDdL1(n=d7rRjpq@IIvU!v>27&WiOo)(Cv&Xd}J{DAMpX`ouCnS$H3 zoOK#(?31y;GpuV>Ykwn}R$6HODCCo|`YgC?YpA}^uk)r}>&%MU|@H@@`mZh<0y(V9({-atSD8{p*xdZ94IT6y4){I zb@LxSQ9RB#pdt9BQTop$?e|0I_+)apf(_O>od>g}J*pB+tGDRi(9Jv0N=|m-K2*2* z4pxHWhUzUMiAermr#1Gf+#Q(1(@GdWzpAfRot&F!9S#c(kz!WcE;Ui4W}z3BD{bL_ zFrjCQ4aoXFlm%6y38%)>EQRH0mIfHfh3Xmg+VX4Ldjh~r3R(!QqO1SfGy^c&oBJVg zBD5~GCqBrO?&xE~ptFbt+P*(lOr_>#n4y%N`?35b-iwQ{V7O{h5;%MTW34YteD>!b zyV70Q(ET`pO`hM*-gk2JKGT&yc;&c;_@3;}Bb%n9L;Ef$XV>J=S=2~`@VmaF#9TN# zWTIb0sW%tb&2zhfoQ_UJii(S$KJ)(8!>^uGn>90Uc@ul8XY>4%|NP-CO!R7v7&X$( z^42$kH{f2CC5!yXb&CbuJ|ghDq!yWTE54{URCGz*ud|zAzlmScPlimPk7B?On? z4#C|uXdn>W-9iRPfZ*;h6Fk9#ySwY)Zo%E%Ex5w~_dIXa_v8MUIyF^LoZY*3_v+QF zH?4h&Ic9{yuVFkjzTY(eJurLKA$@RTqn>2U^-FO%8+eYl6;jre2S3{=Ac0!4Srq0O zlOWf#(z>yj{BOwjJ9@*66v)^MiTV5F-3NG7&22reyNM$A6HcX=&^!xQHtdKLmh;R9 zzh8!|ON?#}9=zvd=zbH64Lnl{G*|4oHd}q*w3MX=nGVid#Yn`sQmhx9k>zv^t^!_p$3c>JnHCWKqgIaqep3+u{&#pSm%rY>{-~lLGTT}u zD9M~iCTeNO)7Sexa>H0M#c&StScP|j`=t3npG zh`(XaOXxhD-_tU4NM}pN&z_o*sNxtEuhhX{;%Y;NlUIjR61k}Mo5F^!>N}6*Y?T42 zgdnS_w7U*e^3KhJ{1@TGqZ`(9_s_ zMoTr*2PrrH(e2zwGocYU+smX>Q?XN@P_o19>HMj(d(n?mMuo1=9-<=S-%a`0~5vJyO6BDwx*7(h~C|Tvt&E` z_&$8dE{U8po-~VU?D$|cbpD+{<7FoPXYBJ4+k}FmA~J43Rd5PifmxOXA;Sza(|byS zD?4eq)RKn=>Rg#`)#b?iw3+V<*M4zU5|I^r{iTKr(e>k*Qt0~Yjl!1hpE2DsN?BHb zk59nQj$GOlDfg{@*~<<&b>Y5f`k8JNWi#h1>bOX8Ud++QK4QXgP6sv%;Y(KZY)_n& zH8XHIveylz60q&Sanm#lW^VPsrAZOuu^O7FMJrC?e*-DOTHxv7lXekYQ^Rw3FCGb! zoGgMQ@prL9m>`#z2w=aSs z6uFVn2t_je++BgQvA1t0LE!H@AlCJGI1jmOAPH-D@KLl-?S-;48m--8{F+HVtkSYRDRNz3+_VW^4bGb)RAhhdN$L+mV(Y2-OIOdNxH~Mg3|Q zPd{fJ!xkB=%#;PZibw}Ax`EeM$!1bvyCy$gA|)o$=ZbXssuDV{PYT#*@mKaeRu%Ma zo3pI~5v`hbpNCITe@=*LwF5Z}GV(sj(TG>_4#%?0tpTGU|M&Es6L8Ed-z8V%`YLTo z$ah;gTQKo{sFW(imk27!u^P+YAgo2hg;&d#e#;>(J$kS?)3*q%y)iaqN(nX-!Gkm9 zm>#8O%qm4M+kSD(jrl4hPft)-agGj0eE?at@z<`#`GkY5CiHsC)X`}MStyNa-0sJR z2ou!yf2*Hi{2mEpL^xe6@w~SkaQM=z#q;+G_XKWD%sKs&V;u^7f5YXAyx==lrlRON zz=ZSxm8)tMlnH7%0X6(Hab-}if_|$K5E!m6QRbz|{f~DyKd?+P%a0K$C02FU2Yq&_ z2SR<-T)n(Hm0fpw5{c7@E?ukK$;b1BkN<|%OCRnmjD#w89uwkE5zC6-v?-;t41 ztk~x6-^eHr_s_NdmBZdt+z+`~cf2{5g0nczE*k%tmu;dUfATXh$9O>K@4!8xBY$#) zw$@lnGR*mm2GX0JRKKg$<{+YD{ahId+HlC-30hvIft2QF$jw56f#`#nC3*KpLvW&c zcM-nhlG7tPl*sO^vvOfXj%u)qGW)-98kZw-qr!cR1BjiUsWQwI<(iEDr2!XGN>MA7g%jFD#LQ>%v% z%fx>Z_i=eKOB6FC9dHA>QA`~yb3zdk8qn=T%@s=lEsfgmJfWxgrl+6(8RJQ6cR?YJ zF#nTp3v*-1g0^=4QohR8ZNr?@f!kWyhjHQ`Q!)~P`DTc=EbY50r{&m0# zLyggHfeFrTh^D3)P!)gSFXP@mlgD zLl-~LpDQwvlC0&8jzwwn+%NUvXL9Qc*_LVkAU&IE4_e00fN*M8Dmhuv=be17$@!=| z1x?rgULs@DGFrAAIQ!*TaJXS+x=Gw;K0YDg-Kg@0BJ3c47ZWz9F}ob=N-Eq>A_CF4 ze4am0*IXRs9cVF9*^L4zR9c&BJTA~{u{Tb$zqpziW)}eMZpD45bi&Dbc3i7D=c~5qIB7$Qd zo65S@$QwNo#br}HohV79a8Euz5JW?P#bW()R}fb9VKM8W>!=dT-kQ=Zyv09te;Yil z2>+|JNt+AU()Ebo`J?+OIv^znyuzfo9iK*9TyU&^O7b`1uYt3{mi-bIAf2SS^IBO! zovZuCWwiLOSJv!Uc^rvyHHjv~f_+*{*?&5q>jq8l<`s>l)k`p!hzI0A6d5!xV42LM z&sE!%c&OC2c%}L8Yumfq%0;HxE$Py=G>o>01aG;=zDO6I^0ZUSoK23KB*s;U_Q zHEG4GVa!D?;Y1Ywqhbij|JE?sP3@%88r-=h4;VVya2`?6(k!Q9!ZDW#AAhyuB7zqd^f-&mcak~Qno4}qGH;)^-e{Q6Oy_n*pg!!RPO3m z!Ze-xA_P^Q^%qIg{s1pBr2s(Xi=7*4T&Awrc}U18ubsnw`KOg6yk(lL6ea~5vKp1< zF)>baS9Si^+w@oXScM~t7<>g}YgDT2Wv&d+%`AZ)6i=?ch4Bv5x9$EGxpVa2K)7a0 z7%0I~1NuTX^wINet{t8}GpYCO#Ne>pX6Ux}q)12Zb8o`T1o`*DflBPqq%yrVIp5D+ ziIC+mzmQA?H!#k7`+<#eRsh5my4?2u4-5?fs12V0cwrzkb0h-ZtR?E6yPZerycMPI z)q62)RgAoR#08a`;akno%x1cWTza_*inqVdx_$XmWVvXYVN^n>EG64Qq@4~VuN>1k z1Si#W3FFCt9n4ILn{JWAS$ktcqETzC+V<6L7-_B1N!SGeL|AfGO_UiQK>3s54Pd%1gN1X z`XKZqQ$arXM6?g23AJ_o4FaGy;`ER0#J%5yV8%=IU&7rV{(OMFQrpI+5S{G-^}s6= znB)&v>KwSQ84E-|TeMr{w+P!!Ym>jhl3X0~j7n$b9UVs;c=vY0sk6q0#fr@HCU()M zxh&Evxj!0@Gds)u6~ZB=BBe~_M|tvqSamU}s0KSsitvLPG!;GhhJ&g}b+UhObRhV5 zmQHr+s}~!1xTo5us0~t{@=?3Y!9{DyJ|Ao-qGZEIy%~6G)`>>lOT)aI;#N48s1f;8 z9n~Phf|LuCa$AGU{gHqUy4lHqbq>$2tlw&~;YT2&D0AE^WZTIiJlMAOVeyAaQ%;VD z<{+h`EdCGNY%2h#99Rl*a6xQ|#^(&6+g-rzZ|n*(ZzQi8Y31=%|3h`IIsu_a1-tnq zj_t%NUU+^JZ8jO6xx5%37Em?>?a}=b)PF9j`7u^vrDl-qzeA72M2%!#gtlG9e_t5W zhVjpG-6%ln%HZ?jI6689|E@S4o~yj2$$t6*?i<5o^J4k~vQuN2yz-(QMRDVM+eQmR zs~5PM72(ADaz_nWDE#$1@t<-@$CYR0Dz&yY)9IhSq*yFt47K%{A!qxs?++bTgCsDr zTcKMUA7;zb2Sd}Oy3z8>VWGiptCBa;u3?9M2w+r*^dSIr#^kyX?-~C+bYu1 zbX)FW1nQzR2-)syBm7UG=E>Q0`SSZ^sg9p`ppul@ipNAII)T^b!&z^&)<{eI$AFOY z9?0Wa+GB*?d_0Mxm2wV8G|v2SA@2LaQm z(rM35e8UL4MDL9KGw_o;%5A8sHMX+QYcgO`FOWcdj75di@ZUnQj+mq7X4-oeq%Df&p`TJB zLTBXEy?;90J0moP#*EftmWo63&mxw1>PFxhZOe*O3fG)JXgHjvQC<9-6D_N`Lm64@#yE@ndurFlS6PTvT18GkxYEGV~Wug_U2n?i}5Pq_vP*~W}x~i^cn-L4EV2lYuDC~6+I0^NE$GtT71ExgMc^kZDaPP zi|lKvg8XA*H;;*VU@TrI5N?~Wef!#4XGK_z@IKMT0$!ib;u688eTt)049G~$9RXw} zan_jW{pNyp{g1|6@AJLt#wREh^R8y6Cfuu!yYCbZvkLe2_+`Zb$aaQ<>f-9%al;&m zD^T~T^KnWis<^(BBSV#&&VtEXkm;f!^3(Sq>V(*PNK22O1f`@zEPjbSUt_$9Em>+S#&hK;7NGZ~Dp7Z%3#GnxR41BZ&Syi{N7xckcFY4Kk5%Y}#&uV%}(*=ZI0%_!``Z{O2u zt8e-F3+yWvp6vU6NW0!Ztm=B-pTzaZ5=*1)L^HPyBDl$%=UW(jRSG;%U(Ax=kT~od&yrdeyVwKVUR%VyKT5(1S`L4jUjueYsX;xxBbwD zjEQNdm2Ci{vd$=+o?o24ybM`>JM=un7f@=0V{fws>)WIS&T2`mcS(V9C?a-`JLqSY zTKm?$pDnLR8!bYNtq*Hs?>8d~58YwJD(yBfLi_df=H01HR3ou!o1K1H4wnepo`_i# zNOrXN<<~%mBZlx4o@>|(0C8x8a+i^u6O~&6+uz>ZOT#F@Dn_0=KQr_1Z7sOi1}oW* zfy$3*wUNUQv|+>I`C0*hT=t17KCF*JJ+gDU*!q;0mT{v-k%|uc97df!BIFk4kj@>;K9U7dz%YbBQN=UcivbRS&! zJn#81P?qz)jCztg-kOOEKh@{5M5U0LXKo?-YOAb%a{-qDvEI7?4`LtBPo6Dh+ zJ{qQYRHzJxM(tDaD z+pz;jb+~A5)YIy-{!CWJkd%QM& zxfgd>6)&U)${~{uvF1k}xEw!Sl861Z-LYKdOY42+Lx)#%-B#JLeN9MZ-oEr}$b?IH zN)7ZvqLu;M&dy;k0X3*Qj4}I%FiM~6S59vmzEtx_*|5k(p!`AI-+j%q-0W|L1GNc+ z>?yBbjFxaVK9x)_L{ERFqW`;r5$k9HLpngIyU)yz^Lf}58ELBdh2y-ozbW1(T^Yxf znyM+7-57uOa`R6$;TdAZDKZtBk`k0LpBmcq;aiAdn&b@QK;!bCDX7}?6#xehgxXd# z?6aBpX6R(m>2HoTp59Dw2lqvN2oU(sN?8YA6F~q72{{^fAR*i)vU#1Go2P75I7^9Y zbo^`)QMO#^@xIT$XrHiS{2pEP;F_G-3^{}SWgP8PggJE@lIa=k@)J|8UOOB6nn6 za(|r`8wNyOjugB{9CI3KF|& zyH?@9OtjNH{*j7u@QJFdxXrSx*Sbm1VIv~V7SrH%rxDC+7ADpFAEE9JCoiaXU zcak}6u5tQL`JR zOHL?h_n6vh{i_Nd4L<7MGx_?ki}GAbT-HvnYOmv2>^p9Wi)UGc_IV#02sStLUu?fO ze|?kX@_$-_we<=oh`8GY8YGyqTAZ`vhP#2h!s+f33rCXcIh;==^=tQ133lR1W$Z4| z^$@lwM1>`>eWtti11Nmp-NnHTZKK;!cRKFRddRpbKiftUK|j*7u#}5qb6s3-Bpo$= zgQ1-AXt!PQ@$G)HmWFtUDMr|SBEnEE#@jDoA<@{eMvhLKB8M}^eh6oFQBgZSz>3|y zG`I(jK61Xwb_A=ExbH<_wQfDQuKmOMJJ(GNKh;)FE`DmVmgGjOp>UU0`t^mlr>5!3 zA-<#SnY$-yArsQ}fKyx(6v3|oWnD(r2m4h3!!j-zMz?RusRLqetUY0U4NWJPC+org z;xk8kh$uSmEzULZ>m262D*QXiEjazNrZ2mya%;HMEsOb5yliajpP>eEEvEys zJLi7&Cr33m-eNB_%aXwW$hqT{Ats+RLP-^gxY@@ZWM&M=-$@~+mgEb~J!dV)!1hDJ zkX^E}cb!H1WamtT!Swo7{X4%^F<`=Z2WZ;t0>6+L-Y1Urnu(t;jTEOjpQNw8kY;}q zQ&fUqf+Kj+RbQoQw93BnF*S&=<;M!e@dn?+p8sBZ>PFKDd^4)B-++koVrY8NTMQn{SXXeA`85FY zN!6+xFkA3jjPRj|zE-|-&>BbiAS#-8Sz>^uL2a|tdLG}EODR2p|MwQ%Dj=o`k+c2c zqo(RP*Tv1NZZw6yf;-dSCv`k4|v?u zX}^cY`Sa;r&&iGm4G(Xh={Rh*3ripRV?@!}EWs+G`dQa~#>G0=&2Ju+|E>3I4bodf zm>!j|zFs1Lc1 zhu+9%e)4`RA61_R_C1PL`h8Kl%83tI4X_Q&ou(uQdrmcHv_X|(RSA_g7scQIuCNt< z1%OwY!zw<^4SnE?*zXZO2!J$n=3+MeVv#IctH-ib3pOT9<1Jop2p@z3&t?H@V13vZqgg4ZmDnM$JHqNFHLGzy>kZl2kmb7Ph zV(@`+g7^aiKS2j=LsjFnGjy<0?^Bzt5GF1+_}Q`f=|dPdoBcfk{%AnwTDHuuRrSG* zW1@;A#cH#Z1({y%^Y9v$y=qf39E;44&G5xZvpNoT^>evQEK}iGZl=BS=`~8R$qk@S zp{u>q7Q2qStd!BkBw7M8kiUuqfl7nP-l!><9!}EHh_tXTEqtHl14jQlEjLrxg_t_Y zPxAb^ce6|GTF@m-ZX^uD_AXu}YD97%tC{H8<6GqXe2YBPLDic8Z43CBjzMnCHYy28k0 zz>&h6a{AJmI>lo|I|Oj+vjpRuRDLEjV>2@XGzU$6DA_fK$~LL^KEU&j;wi1K>%BpY#bccU(5Lc)0)z?qvEPpyqh=J zDbe>MGE@AV4UmdoYDpYFB0BrMQXA%E)f)bC{$@vneEhP1+t8EN&=6dL!Xe(-6BO2* z@eJR10$b;C+ruLlzg;^K?hxzHk~U6e2;J*~vNs5*PKs)cJg`ooP_yHSEXV=;X*NYK`27Os^7`mB{ZcMes1@;ngG$8LC?Orpa=ELWcGlC+E* zF2JPo5H?I_J$uZ+KG|JD*&A(7=eC`H)TD2n@vRK#5iK1yxzZuv2eJHA#>uILPU^y_ zwLR-a^*;ZCo9Rk=cJ}nCAorZYU2Mye-&c%Ip+_eU=s<6?+os4R#FkZ3jesH|MoXgu zsqYG1&R8!TM3rXhJ?v!Tf={q{5qlLPuF-Z5QK`vQmIPSZ6YvUg;(#(vLdRGq|b z;}_YRpITFLStX5Kx>BDCE%g|k1ye_YoOMX9N3~E=VsxZ5;ur+0QA{N7okQ~ZmiSli7uR;y@_!m{z;@;=g^-i@ zZVO5di?j6?lhpl&ech`|*)N=f#65YowD}Pfs2VPVJu2MbI%jQ^Pkm*mMqVB67C_Sx zlqGzehu7``&?;y$^#tk(uiH+6In5Mq8(B&h_tjFPa)1twe7<}kb$%fIW;UIN#$+uW zjO-f^cdQ-2jhuI}Fz_iDOJ}hGBH=K$CQO--vU9I}}JWQ5DQWrWvtA!SYO! zT|X+M6;fAXE8k{g>-~cpW7n@|XW3s1r$D8FE6tA_F9`3lvz3F|GS&~5_%f!S3%Ox? z*>PTux1l@yqyJAoHKQa!^-YrouC@R_8sHNb32WI(pQT(&R9~L)vBE{aw*;#W9A5;T zZN!_1h7LJCYri`xb{d0Vr(+E--D@whG#(Rk&U0Wo4&%Ik55n2pb_#QKjrk6?)pa zu$L-MMM_WK7Jp8;9im?D5$IpBT_k`DkuJ=IgC7ZIsbVi7T!Hw7BUBrZ-P;Ti2VyUD z^#h6TKluu}))Uq-d*VCB-CnZafs$j!=2=cJ?7jWgz3!8J@SMQ;?psN{xclWcEsea? z|FkV}7Y@;@J|O8Qp9`?2^ok!25gI=my+OiyzfaF8?Qqdfs@TqJ`Fp_!gbWduD+DEU z2R$8?Rn!PU${L2|X9+124V8o@2mT_Kf4@ig0MjWHq9Jk48Y_!O`XqJPZS}U2;RlMM z%DU!uLm$fk)`Z6sa_pa{gFJ5p+Pg_l*fz>|B~KMTqPE_OXIvr~$lCk_@-^q|MfS7` zXA?T3uYL~bodC%iG9X!7=wc`fX8Hbw6hulLDx(VdgtGZa3b2b4u(55CT_J|}nK2#F zo-g1assFgf9srC>)bXc%&i8=np#WT%ioz$*UE`5rw&_|qD$G9;gI77AP7To)4Jzu= z%-dJ%i#3I7}#TIx`a z^rK6~6r6wk3Mq-=Q~4cTr+|p$2G1IzNWsAUv+mVs1ZRoN=z_$L60QoMG}GKa7(~9U z)qC#!Pc2duNmf^IXvZS-F@MFR^C)U!FJgU)wbd_vJ?H}0iGP_&S| zqnpIrYUG|qsRHECpFt|wIomNw(nQq@g6x}IxHABl-r4w%0uEb3;|$e%-*l> zy=%hg^PkVH{Wb3Auue_LDdVASnN~?3vCaJ3zzutHWf=*XWSc4N>D~6_Ar@g0$oY%2+yQh~YkHq}X2wqWOswG&v zfVjIYD+nCG`tF*mHcX~$J_C%sZ9bm8ak-2aY0ufw zY3cuCoU=3=mkm#g!ZLNNb(FZn1~00?y56lN6==GumfZq-+{fX+aHgZ0`IEq^Pv#AE zv6mUHh627lge9oDe1M~1ro~W0q20@p)OjGYQ~!flWbyri+G?PzW>Uw}PS9+DcLl{s zWYaBnx>gj`0Ovt62XT90zjr=E@%yerZ!11&8`7St1H;(9_2dOyAa&}0{G^D$N9}p* zTxf1{%Mz=22QZ^truOMX9g)V1RjTb&rs$`lL&Nlt*LO}+5(H83y5w24#>&yApg>hZ zu~C!#ug|$(Ar^<%h8z@Y*#&kt*Qg87K1QsxD86pnWQF0+6Xx>SKi;cpU6)A$POTK9 zaFkfc&ixPAa)1UDjBo^W0r1Q?@5O_NL?_&G(>tqzT{9hfEeBj4+|GP%DGIi7oAduP zfgp>^JGv$*8+BsoW!~Ttz;_R4LTuna>A}(mW9n^TF%qwApQL$53@j(a4L2XO0srMx zLriN*?2oBuz)_^JC63N@80Y>f!;X=I7KVp(|Mxn{zE22Gy($Dkp!`l+iaW`t6OXgS7s z>t(GG@-sr)fl}V>+4OSP%(mgb>$lR{1cb_@1HdK)&s%lfT1<1&D@iG$M3)uw$WJ1E z#GFrL-RrbhL`jVayPHO>Qy2pg?_9ohtq`4%QZA*_b~i?xz)tXiF80Q7U7$cXKX9N| zyOe+OMd9)FCwz;6J~y`nyD;ViAP~ob?EV#6*c&0VCUDo^Y|`47JNgjv3>CqJ(D}?7 z&9&5f&P+K?cB{~Dl0Wnr+A4iB00s$9ZwsTKJ|IFxMnZk{5E0N|vaHpFKB!Eh@P zP(8^Q;AG$@f|yE%s}>TRG?sM$(Fr15U?)l(%^yAXdSEvw%M%@Dpzx0W7=X-x4PVSe zF*UR*G+cc>{h`VA8jNt1Ds;D3sMk;(_&}>c+?0i6AKV|?)0W2Dfr$85l|V_jOEyK8 z3IS&6h`k&fMEeX0*T>%~$0Ym@)eWh60IBR1Hgg!Qg|JhAa;b^XK)GwyMH*0wb<4$N zXfnf<&Ob}|L{Q|TX72deK3#g9M2=a|GE2r+&I!OlVIZ{2aaCFmzE35xS&M{-UeVh2 z96KAEi7v}eIF-O=8Ed0jnSZ2OxSk=l;gx*(CBk(K5qN{GC)_vcfjYWf#s;+d=V^Wq za{}s{(ec%VgbRA$etoVH?lZpTk;=1|mcR>HjI|@769QhXw#yOPhwIXMC6Pt_g;lT< z+PANO(o!B=QnL#4G7ajfJW!NICIx$~gnr^vKP$w(|7A2bcL){-TYjGZ}`Ig^0K<5euE$o&#- z2e~16n7<&{a#5@}$v3vA3PCYF4_fe_BvNjeP2$djo7M!-jd=Djs&Tv1$ow8<}9mP0@9F>(<|Gnw4 z^gd$KquBgME$my1Pb!IP*lq7sQdUw^gx(F@RiKU+_n*s&L|r+FvinCx#MOrwI<}kq&z~EJgVeA~WOi3l-z>ejm8dwbzpTE% zfPlxCh<$%W;qJmuAG@My2Pf_N@k&EO=*~Qk#7)(m#N5zmU)Pse>XBk5Kecy{E7I4P zVgFw~HfxXQiPzXLSe_B#kbP@=fXGfpJPXN5`NGitpZj!|LX9(qyZ9+wT zl5-@b5y2PF=nzfwhRcUN6sb$?wKZcN(_hnzwo7V6G5!)lkVx*gJHX#BFMy&)_#!asCBaZ&A%N2ynV#Rk1LtH_Y(3DP0(UpdkE-Q_kJ z$1)(c4l~b8qyAjQ9lgeQM{JCf08^|KE*F}16y$zdr?YOqZrF=CfI+RcJ_4KnNHvZ{ z8)7AgT6mUyk+{nMobmo=nf(V&S|D6=0aiDX+fvW&bAXaXWzPE3q@8q!-OiGvR5~oa z2(u2c>*y@DUPB@VyZITIo9TXZ@`rdZD`{iHA88;KzSDUa#}bHDy}oR?qe-hz>~{u{ zw$^78FsH+fQH};?HYKXahb4ZC5=yxs%ILHme-0^lB%P{tYB}-!-8SFwML2B>9KihW zVy#y9LuUBEp*>?c%>U}F&}mG$NU{pRB+YHiG{t#f4oLDJu8Dq5ck4-kF1v3@!<^)t zT7QZN<+TM8C)l>erTn+8;@HCQ;q@HCzvNPfOR9+Z zxnxnV8c5V1bVH$%%zgm0g?nTZRH79iXunZLse$>|-i_wS24jU{EuD6vOQ_>dX+l5# zAcKx_`pGSD&pCO~D97f6F{fd6 z^zY>---*5YFpcDneq*WhKGt;ua;>}xVn4p9@4ggDf+JFu7(wFi^LV}EyUx5>TkE?Z zITp>DBad68v52LZUmyD=HfDa#LWL)e(~BJ>vbz=6lYX#}FM~ECTnXRS5Tww?WE}bj zm15>qspYWt_uO&pMa)w2?zl6a#oaM|m1IiS+3E{tDBlxSS~l(TrNu_+Pro(falVSV zngr7TvVrF%-2W8d8;Jx)4#YQORsSGKWX-c0Q5oVtyVdWi50f1|E}v*^MTV-@;83!8 zj7M^fNHsO%X6xVW0YLM=F2o0&d8bJ(3T;1(N~#aiTs~M4!$gRKihfE_biqi0Gaq-5ylCzuKDL3+vAP?ktLChG`>3s!il6`Gq;l- z!QNXyq0P6L#{XFst`K92Fp?6Bcjt{f#7@P**Y{*RVY-_gKae7r0$2?Y=!7k_YY1w}NpE#&-x&rPfFD@Vu zd;cvK{3=A;#-3>67L0O6`5|;~H%d*jo`hX&T&%);C%`tw`|>7zZLDl{2fuEK@K1q( z`-G)C2u?pI=$FaGr6DD)a+oJbey@zD$mY*!kw8zv^`P31PzjTiZeV&Yde1NGjc*M? zrtT7KTZ<9`rX5+7;;VT^Ga0Tf5VW^XFZ>v>F%Q{HdLeyfuP#ED_aV*mB!~CN$r5$9 zPk+%z%e|d^DC;(ttJo-yVDMx{bANxb>-Q@2gP-m_Cfn?AZ376Y6GzIs_I%fM^)|ys zUXp3@)*9W?Fz$65ts(yFMd4d`OG7||kBoC88Uy`ZVi`Bmlx(dcmIMrfpxz}T7~C-r zRzWH(y+67G=p$n~ENy#0L+CXDT5;l`Z$4v%m}gt3Vd^8&}o>sI%j5&{Q>4T}q z>>P{hs1;6{2=TMHh6+=n@WTlnd;q9}2#5Z>2sv!MOz*Pv{xR}A#9Y|;L|pjt?`6AH z$wTaOimX~v<~|P~^lT7}4_>mWS+4$W_+K0+mM~!0jf!H(07<$+e}{3vM1U|h%*UJz zNhSN$7q_E(_r3kyoAyMF3S?}_k+)-TTV1Y54i?w1=zf-xDAdcUK`LkqHBperEqUs@ zUhAUAyJzEMK!9zPpweem;TkNs4D<>HW0*-p#nXWq95w!kDbEHs)=n*Llf4)$!4ko4 z$~#5+WyVQ-kLz5Z{ttK1vyT`TFkEpc3Nc)NceURm7Vgde*>}QkVzW(4$EHT#$Tqg5 zXQfhUufobrClJ>r&AiZW5yE0`%VilUCt2VUs?1-6rQkAaIXQMA(xvAZ-0c^HhCWC9 zBXpxRoAilDm_q6-G3ooG)*J%Of33B4J;VMTva#cYyBwhKe|ys5BkJk<8$sag#)B7 zwv4F2q+E?ENmQM(g1aQDG}|$OmF@TbWH0~)qkPCW12kd*$9!e2ZQN#}bZ^D$uc0q~ ze{MZC((}9H&gr=ZI}}@v*#m|bFd%Meja%dQix~%v4P<{Y7fhvWLN<+5S)#VOWdq8n zPLye$XWklDREG%K7me{|$^y8V_tpi<#JQlm|Ev*K8+^cKXkBCX+|MDh`u0bOq{njM z8^di~%71dKC2j-)-Z_&v93+hW(MXZ} zg+xV!QrE#t-K*WH4M(Khm`JNiIg6?4IIqzf6?ywS4>`R6WS;5(Qk#OJ0zLbQRyp4A zn3RZ&>ERPn+>%Ij!(cW3>oilBDEC5}PwiKW?midF=&HU+2XX{F^z@j)+i(YPI zr7WppPWbJW^+G0DL%P~AgN*l$cKW}NBd;8Lj;XB->AzA*hxBu$wKVicgm@PJh*^12 zNzz1uuol6iO%oVk4nENnhIKf@I;TXtZ)Q*pqYvYK^vt<$jR6Do1K4IL)ux+a1a$jm z@ncf$KdEy&(4DF-!Bb45*FE)hyA8{3Eu*Qd0foaG&%#Le97Sdv_1Nx(gX}!8)Bi+M zPWa!FfaQ+uFNgoc=IDVdArkvVVdL}wK~&k#9p(g-MaL&Tf6nN@@!EkNKRkWAFQS<} zRrSJ23wZfJ`Ao{uo7%tiqPOwLc_nC<8ULQ6oqk*z1|@_rSym0+q7RYMFM>ZQ_6|;E zc0`2?V@+_|5Z715b0d2Jv{@^Q&zIBl}vI9P}@u^1NQQR#1lUvLy}Gnn4}6D<*ksKcF8*}G~J7k6A`{sQm-~V zns9QnTRzK?-%b*Ja!u%X5P(S4V4e!i4fpO+194pL3N7-u^5Q%M=e^kC@B+O_LHfV# zm7q^>D$APN8>aE!o~&EzC%^BgrIUa@qok@?2pffRR&aY(_7>DE?g7MoraiRjPLH#p zoxuEd@uLjJQ@F1tgDta7S1w(gVbFY(J_59ht=%&V-V7ut53!9&IlMH ze>Z)3J#N}m53gFlA~#~VQ*Qmj{Ifvk^=s$D)TC3WdLWgCA)(mxLy<1f=Wy%wj6zU} zP(tVexJBHR_Shqx|84XU6SJ)vKM)CUNfNZ>9%6NKyflI7uQl%Ty7J6)5c)hZ6A(|@ zT-gjY7HM!gA>Rt`h3rjO88I9}cQ_Yh-K~0VXY||0jMXB$t&iD`{A1en(eg0`hTA`Q zg5&*69VH|>DZMgD?pyTs_NWN^3L;}kJ=k4#oAiaWn!@0Px!Cd@;QlzE)Y{=qxe;z? zC?uMUL2Z|BtB(}rdDv7|aeU(6v~7b=5p9;bh0G*TF#jT0wQN>HW=oiiRLg!UED7j1 zQ>2=I!J4E3RM`XFB$Jze^@Dy0^=XZ?|AyhXQ{{)dk-Jt}rAj6h^jTB-j(#BcbSknG zW!nRZ7Dbfyy9zn-X=)IR9K(tp#t#7E7F$4527pD)O~r@zMJRWcsW zqJD}|ar4Olk4#UO)$3U;hb4wPXksao0* z@8+TZNx3%=v~<1^(A0J=n%2{x1YX_Q6ssPIx(+b@2!>q$!0j>Bj z*~km>-<;xNaJ^i%i!nrxNiEoR8JN6mx7E>%xJt)2OUjrhqCJ1h9Q1aDii24e*!*%h zBy{S>x4WGWorou6`Uv4jFUa9=YYKA;Hiijin}a9Pz3Cwg%%oy{izT?&#cOh-rBPH` z6c8|FxZJ`C3$A3=fy(~)7}v=)5k^)q|H#=L+25}Q%mGem!2d^)6_1SGC4g;DqiFcc zB4#oqc@|_I5e{Cw1$2#t;=Dzc@oYxN7;&p=_S5)o@Kxj;njQR3Mg5Js0cpISExJf! zl_<69<+P}i9s%WPO+MZ1wrGFQE83KmU@g^?1#_C%=k14hq!jo?W|rdSJokcF$EQ4= z&$cJRDP%`-!;q2F+dC90u|njx{h|sF{q-JK5rxC08(gX!5@xh+>Z5O8P|A8i>9Qd^|m+8QrFb zKb_F0M!-FkD91z1Hl9oP#y?;4AY_pbw0-Rj7$$tNhmXbMc0oRW!|Wk4*14k)MX9(s zvU}GqdZ1@#XnyhZl_jd|m&40PQsi5$1;`B3MLufe@tF;#iumXmF|G_<3S1@Hw919^ znYT%_L;qLdaqwpNHj|2bb6DWdVn*+#lM^lzMJIs#>u1O0-h)dQbB=!=)y{Z+ zy-G8S8EpR(s4d|DD$R%5-z_DEHqQBes;jFHmE6@WZ-GJ$47Ko&H4M5iKb0rIL#(U~ z*A09P>l<@?rF#+rzw<2KB|DE@J#8mDn=_!xEB*R4zTey^Ha9*p49)pm^uAe~vw_+vsE0+Lg)rC6BMjQde?e3X>5GP12nc@1Gxsb#-!j1v(hy zbfHbj5);~4i|N>*)|Xa%a&t#0hWG^a;xT-S)HbRpu$;PGImx|EyZc@jd?Ktg+<5LK>DO+ z4~*j^FW4wzelf+VSA!WT1OqzQAffVR z(puRo-)em(KLFMYsS#bGbc<_&2g8-$Eum#kIiL?ed& zes7JC>{wJ~aedD|)u4(p}Qs9f$6a?vU>8yxaHt?sG4{;5?jj_NqC@9Am7t zo@f;mx@B{Se2}Joo8C}D9c1eLbcvHhf3ti8Z(*Nx6rO3a6M@uTnU=3jG`H4wH#j&! z?`3g3kj-slDm}wSFUr3%R5*3{1`UXnT8=bxV>0-*v`SzxFFC_X0V`p*|0|=Q!XWYx zdXz+!q0pB0PXz{%u2}v$*j8J1X-N_r7@02kVf`r1UCh*i7o2{~fG#=S$`R9>cDyLa zA=|D}YK0=ZR)h3KY{gfH&M6tlOU3GD&ofho$GDMAq@k0)p(HWM#~I2+3ZQBI3^P%1 z>a)4-Ogovf+1XZ$;|(J_-~KmD;I1Q!&M3*MGb`;xxYUli#pCGLYHIplYt0ltQRQxa z*v)otfMdO0tS4t_itA{q++gvqAJ1>$FeE_))UfI)zW_BZp>25SV=9erm~w&u(s>eg zBPk;uyxo4K^>XlWIO?MKXW;2%u?foOB_Q)JG)8;T-{&%wnqpO#D0FOzQb z#5x>zqfG2rffvF}6dZXD+s+7s!S&tOJu~N={zRJOv}aXXj5b`VCA>IO!Yy;;YO#PS zzJRrMGV@pL*s@WUB^~OukCdOJ6P)zNWZyo#&Ydm}*27(=$Dqn;sROznttp8!EjG^$ zSWH#aK4RE1|E;u-9J?*DO!42(jZPNdSVGu$QSm!54*H5yq0xJLZEdged5D^{^KtmC z1y>7>uT>JJVNzU%7nM|Uz7aPDv+>dL`yr=qa3`3MNhCDD=fj>6<`y+Z}9Lb{;LO#XakZEiK71<0k*{GhwTq&yGi?JAl zCME4sm6mb!N3)r7!0XGRyVwR%9<rVigSU>vZ2R zR1S>4wIuqW8S<#PoZUH0X@Zy@StROG9Gu)hd2s(EB1=c!p!(d!#vYz6aCkas z@p&!cT!ZMm9zJ)MVuiO#3Qp<=?MqF|vAibSb{s%b^KKv#k)lw`^Q)n0LrHLI`|Hq} zg`E|3TAp@QREptWj~M#z654nNvKX~0zvzF|$~Kb@YK6>uVCUl^y_}JU_{zwU`7w^M zPciQZtqlI;G0_ZuchV6@8A!Y8>R}gKO#3MNz1tlfgPlcj#;0l^~kPB z9Fg;{L#7Dg_QVeVz^4>_xJg6&I2`-p&Jo65$Bk*5iY%XvX}z0P?v&`BIkx>Km+8V2 zTZ3+>v{^4JvS)U6QNG@OU1VyjF5Qr3TDvU3c;UVHWQC*3?ItkG1A?9*)9db^Wu{AH zy%d8r-^LX5%|^#zq&7FWu9up&;vlO`K(Ct`(|vRKW}E=X zFXFPH+x)KYWscJO=$BF}m2YKeF^oBO`u)NDTdhnH(48hq{Wai_aVEd3(4g^7bI zH95`G2J_dKB>wu0x4@bczncP9@_QWMhpW3k zR5x-_g4a|Ri-HI7d!OicUxJo3K~y0#L!~hZo(QMyrlwRj5f00}#5n!xsEqek=uJEy zG{;3hTX{&Zt7O9$P34DZE~XqCqSTVZWRCL2HRs>*wofmtufKVjnl>&!TxNc`E>djY zJzLjdfQh#hjbxs0k=mcT;MsTrZmtRL35Q^g6v_G1Dx-SW8k&}c1!7U=Tpf>G!X(#1 zRZ_|hbN~<4AB>r&>UPBNK5ng@!B$aAkoZt~)j!vPu6ngDvgqX2P7C-T4SjRx{FwyP z=kXinXDzcV1Mvl`tNRrc@{8WE9gKTFp(3Ik*1e@>5)Roob;1Pgo(Cde=P*H!3)<-W zn`i9c_uIlFM!qu=LAT5wRJWkFM#Xkygug?WokNB;PVr>F-N8Amx|^Iq4< zYI2BmXWN4x?(5f+<w;hi(o-7pd;ERGYVYo*(-9T4h~XpgMfKS(OgQQWUF zB-+bWIlL#)w{z&jN$1S~kQYfX(3!aTN|(2SK!Bj|wl2djOWx5K-c%(~>DId%XD;w} zWfqep+bO3NlcWj0&&`Y3&+3sZ#=Uf}&TwZx!L2!gn51MaRaS!^n?12PYS=nqlc7|l zsFWq;og?(fC!6)$*kqjkY*l(hs@7ao-w&G+yZadE#0ey{%MvR9qj#ZoAFd|Zj5N2t zQv2G5{5FUc<_TMSJ$1tKZFyZk>~v-2>^s?zjkS#=uy>q%VTokC?~MKs?KW5x<-YDE z11Z*t_rnc_%m1c=UP(WU)mppq)j78R{E-e#pmceC|CM*1W)p7l?~My3B| zSE(=44ZZr5P;SsT5XpIebAUeNJPjMQ!PMEP8<@}1cv@$dkGB+fVs$haX~AhWksHM& z=WTvRv{T1JBPs65LV%|)7<@(ICg`E1#PSzx-w^X8Q9=NEyPI>a+)>3+^0bf_oSiq$ zq+0s>F1BCgvI|Wf3mHS z&F!YLK~h*8?>k=)=oWwS4(V_24Zv?`(!I<>we#hZPzyUS^tpiv`BfA=U%prvyjI^w z?TDX`PfVf;Se@?%84eTQm{6n}PhSAm>-SgsGwS{gpI=^P;yA1wWj*!`}?^v%pL zzSDYZuNO_q4o*ni5Owp-1S8bUDBViXU~vijkk0o_z%hmDuIPE7*t#&041Y|JNgg7y zpm8Jz8KRZnZYmJ4JUyZjNSiox6F&Uv=FYkM$HusP_&!8i_d*W8YamOof3q}DymY1X zO~N1_yO8keR+1$iZQ02>xSR!V6i7m;O&QRfRk&=#Se{CTHqSS^bgSFL;4)Fpj}Xue zGOaFv3YkQRt)u7Ns7IE;cOy~hgslA09Hyo9Hh%o2x67__G$%R z&IEr2hrm17Ia|I-U!Isf!l2OcxnWJXh6VCPWlcVi;|d>M&(MFMP?`Kg!FMc@Dy-3% z)(YPKzTRQE>G5(hoaDKWy;?Efb!|{jH;gwzz{+=vnfiU)&1o7o>_S&`tcTf4k<=gT~6@5Fm8l8SRv=Xs&WU0+zGqWp_~&&lvqED%8fv8HZtvm#Y* zHZ#7rRoKl*sc5j)IIsqNIN#!x6&q!Hg6)Oz=y{<6<8iHs_G4JMC@xXKtXGpBX?UtP zLOf&4HBKZ>zpchKJwukA^z;oMfdbD&9RTUFwWo~J2}(}p-Wb=(A+%KH9%)Es>uQ?s z#i0{E#DfBBBgFjBaQ98udQ3g<)S-uFbP3+53r-+1Sx;*4=vC5XWkUoamZuV6mg9eQ z_zQ<+Yq#WN+uRwlcvwGOsv%w9nO_)4v3}+hTOsskXkIr^s$~o8B{un3d0ZW!@$azJ z*0^U#Fz+p<8qv9W#4y$-vrP4ju9iW!dMnoENTAIPD|=F1RPS&+3m8Z**NGi3I8+hb zq}7!nWc+-w$MP}crKcMcvE912V)#>Gay0k^X}%Wlr_Cmw2eV-zPxj|?Xyk?p&u`#* zVV3Msr;p%dx`)OaL-Th}^ZbBg+s5)hxoJ|Eb-uw&|GmXtkMuHa$b%??K?~N$sa^ch zjLxLZiVl`!g75{9L-;he{Bv$WY=57}|B!m}e(0%WC;M};cVS!TF{I^fLmNALNBwsX z6|{0AkPIS^m#={I)R!H%R*i1oBnm#w6pO?pTC$Z5^o%LHLU9T=2m>KO)|g_|SMdbn z<4Bv*YO0TabR5+o#08Ay%EuW&eF}q$5VNU2^n6aGgJ}f`i@q-|S z2Z|hc4-Hc{Z|f|=r29-!#smj;UeMSVHyxgY@D@J!Shn_mSA4?gH}Jwi80GcStnfAm zF5ga_V)_n0p#Qi)k2uwHsawz7LO>NaNP%C1n?;_5JKl~u4%6~UzHy<-nBsju zOBVxO&dCI00B(*%O-pRCk;ZtsyiTI{-_l{WOEoB^nhS<)7&sY-AbyF1u0v)@2p(3L zUAL`$?oL$+rWowQw!M8*@WO7^sA}Iy9SuBZ#%j5Jn&8VqqTs&0HM$RpM9V5EV55&c@=$k!%arIMI)(EzO{jUEFtms z{h(Szs!XK5M$c!K(kp-!H6F>G{1~E8ZIh&JQMpBsXYjgw;?U>+q5s%=ViN5=(zBP3 zNm{T1uP1;4Onu9tB^Ep%!8=G@3Q;|snIYX>-Q_utfD0_SwknUxcm3KD*?qH}_?K($ z{8UE)LG{>i3!?#hXl(P>{Vgo?V*OWqgf*3{E9;GW*}cM|to;RnI=|~c^mZXpDSE76 z(fg*bBF`!v@AnF`ObsWq_Lbhd=-it`#oRI7q|kn@wKQa&Cy_i z9k^Z3bmmWBoIP*Y!3nbyr>4ONlg+Bq=hiX`X5t4+R^=HVr)Xu$76H1C&osZS@e#bJ z<%swDrmltuSbD7ObFZ#aC$CUCEDI0pJ0t-jNfK|ehGhV57~cnHNoH2?epO$npH+LP zo)k-4d#UkpUT(E4jMF)#x0qXJ@4#*TPE^4CnQAQVO=knngW6lXG^cN)CieYw<7y8x zcnJH_V}hD!%k`*qugjY{JH+2>ZRs6kw+%H`svq)l- zi*M5f5PECyU?1{+wOygq2H*cH4Ei`=xuf(5-!8+1dTPHkvsyA^U11?%e=mitnRs%3 z+N2u1wf9Yel(&ZXx(W9BA;bYt4vTN+6@obZUcAwhl}5hzmI*#+$5Hjs18MdZCs}k% z1NCZs*7u=CW8THF3X&HiG$GfiB*+}Fk+5?rk#`Uw^%>zHIg68+AltejSO-DrdF9^H znKH75_UX|GRu`)8hDY;GBkECUkrS>}2uQ)9@dm+Dx+Rf4&Hsi+40s>=<`A=>DFKXp zod;`{s+Vb;vNWhAo3c6gje~=Oc|~?ADZ$`~cPbsfNYxmfM^UIol<;`Gc7;3OsX!cA zAU+!yx~_fr4Nfdde=M^?ZQ0se{~Jv@U5p{ zRXO`3Z8YXl9XlQc@OU&$ZZ2SuyqK4S(ZAa%wt1L{I%7f?Ia z*mEA|_k6HT0P)fq^&3Idio**IHf2741X>YPAW~oU8~qhe5NKy3CfRT)!PNC*P6M9E z3}0Rex(1h0AEIZe>8F)t0G>=~{k(_-0)dDo=TV3kt{5=u&P5SKZbJACv3Z+K?yGv^ zM#vDZFdWvn!h}MPHt_7wihjX3cALFLmaE+O*d)i}hO_;=5(d zK=%VdyVT@CPeu0_OHpT@w8qQ`sP+%N8}~nXV^L67D{d_!q!&$w7M{Zc#g6n$9l%m8 zsuh=(ks9zg>ybm|m?5q@TVNr3bW7T>=_HJDznCk=6n+^cr%SIC@E9!o*6@=2p4aw^ z1F3QJ=wNDI$lOGn<~4>fU&Vxv*jdB7DO7=$>9tOxFNpCGx7Q_lqb^)xMy)q}>s6m5 z@C4MEKFNg6m2)|;J#g=55w_Ob;*8th{mK!pafblAZIyq0i>X!wAKa*q?;00x7t~e8 zCsbzA`j^bioLNJKi3?7_U*3HR9?(R6_rde^6QCd&J#W(CeNItBUu|ZbkIc~)OtM+J&>B9 zeK5z`XGV^j4HI73OKL7}r*n8(w7bXFI@jx7Y{M2!E^YZ0KUm^YI?J3C)c>f& z_Y)5dw~Msd3YtC?8T9MJmx~`C9D42JXNa%h@NhU@tY07gFd5s(4gsVkEEo;=Eh>b@ z_nUC{acr$NYljnm#T&7ClW#i!8-5>7Kam)huV1`Y(~od9g4D9&YyaO-ZG8WZSZvvj z+<;uabiZ$-20Se;y&oeN`t!Mfevmn!d{3fl<#6`VHi*gpP`a>$z$tx>@wI4U4FB-% z;(pHX$dqDE!@b@`uYTFnEz3ZZ^B3lHMGH(r9XeGDn|!y=bM?jWuksQi`dU9UB9Ok! zzY^HRA`tfEm*=}zU3K;|))utfzgz6lWnMcPJH-rm8P$+nL>SqfF;@p1uk^_(TJ6tz zEVZ6!4>oK1SxEi*I2(khTM-}Sh@Q@l5$4)P zG-$g}PG!jiJ0%c1!6X~0L%6HsO*~yEuGbP`+fjtTu#{%b2*6Gi$aUGyFUvwAS0p1S zwXYS@@EfFc$8+j*+HS;$m{0iCUEzezf;n^fD-u80?Y0Bl=i5yhaNVVkIjqk^d)WqR zIv=V`wr4awze*Jd&?Oe?w?nmP;?5#G#1Aauk85XLTO)^w|>iz3PkMnMLg=BPQ$1ulxJe2ld zq5}tp*WM1DTGV!Q%$#|TT-JKEAiYOYKR_{j>kKYx@%CM*BJ32Y+xHZD20 zx|>w_h2&r)io4|^eCL&mOs5z8r6kXubs22^`7@zYpZPANsjT;Qe(ZSfb>B@Srt|Eh!64pdTIY;s+u5vo0%Ua%f4q$nu{F5FAYuY>GF0XA4`n>`<#N_$ zJASBjuuOr)GtrxFYEt@dNk&v#jpw=d#pEE0@;T|y=FollN`jm~hh^^%x zL=1zRXABpVG-o#NE4U||SU*WC|n!ht}m+yq_6HabGsLsd^R_9JU0dnaNIwzKRQn#__SN&jql!l zmi4UqFcfC_@YV6pYH?>Ff6Vm2f!*CGW7$x(@q`Qd<93LTZ8gZf*e2LI+?jkAm0kXu zxzx;iVFzhvifw(8>x)mgYI$BD2Ue9jhurnr9cI5;5?1^@!2bqR+JNW(!Li8&Sr%9( zRT!ALMbB17bvSSu2*bZn=CyuCJm2g&)wpuhFNnLSG}4!y!YKM4>G#lnR7;E%9C1Yg z0^EP%6!%ws#2_Pzk0HghKul{U8v)O4q<&PJ>wcP%FeDrP(|<$aTN&Z{su6f7wL?Zm zL&Cv(T*(Ho*6%ok5oFMX+aitzI za-aQ!9h>H42%m51Fl+R|zTxZH9V@m>vJ8gR)rmmw5*Zh{QoKK1TAu&(`x(QJ+j_}5U- zSy%RCb?i;z^K`r5z9=f*4tXo%oO_syA%H*`Wss>1&{~#91yJij%orMc4IoxzPZSb# z;iN-<9=KtH9~I)I>D{5E=-m-19!g9{-io!t^L8*R4;6%X>WxB46{I@iGpPZjbcN3z zYdQ=UJ!l~F;88yOxyBr7y@@3r!=N+B+3(y@=$PxFuCkEOcD&pLRUnoKu&@zp0cGye z01gMez6^H+fdtkpkJ&e3pK*S0&&2T8l)KHi9w=l4swqW+JA1>=l}vbD9{k=;5Y?wD z`<>VlxcwMv*}a-%U06Z-kqh{Fvdk_DB=O$Xrwdykt6)4ZjhfIp-*38~-=D)@w!0D} z6W;vz_?o_;;S;xBQd}M?Fx#qN^5W;?`%)1AxAeMkT(veLJMLWx z=7uf(cF42tsdvXM)R>kFj)Bn22!$@V(Q-Gfe|l3$S*G~pS%DRx+wVK)*O{*}HmYPj zZ7E3gH2JlX&JeMtDX-qdAb4C@^sd>BePzP7F*5c-lLe$HNOirrS~Mhz{j75dosZt= zD#Du;L2+Z~Nu0BzOaeQ6Z#1(}Tc4Cs1z=cLBa&5Mv)++NwwLmLY2%G-%1FB&r(Kg=^Q*zq)Rmf=7d*bo>>`1pnj0Z{6?P z+Ed#`&ThOg@E7&%7Wl&e(DNwl7xCmTQ-%uqQRNlrMyYX;fpy_ST?1UskB4s7b`oJv zFSZ(#cX7hOMVBYWAnnR!ch82{qnz1KTa@lmnLzaH>q$Vr$!U5}Uv~8xohAT+to zYIRfOVS7pi!g0~(F!1CJKV8vw!}lW;<7~a~QhwuZ{PeMEmd&=ray>Nh z!}-N5Iy&!;Py2_%4x>X21qGIf2`MKD8RK&T7em=18zJm|-F{JOyGw2+yoK6~5 zfH+M6OPP_ldE-)~^|#A!I@rK#_AZjx53ww9nFZz`I_MXaM%Sdd=jqrTvgfXfDLb4u z2bn6|29vI5&Duo|*PaJ~JA8(k#U8kOKh#U7giGar5_XOpx`HI~XGt~*A8}`XezvP7P;jYSltH+&` z{g&muEGZ&0ADu1lhMzrFNGnmUWDz?%4N~9?Qa;yuG4edwlG?2h%dsLkJ^=W)M#fsu zf5S5}52F-(odp1&ZJ3v5(I5%dk$I~2-Y@tDFVW=o@6d=Lb8igYMg6>eOs>z40aygOO2y(jr5OCgFOBOI;Pedcoy|KkA*Ug1m%^|+E8}#UtcA}XUoPZ8RDQ{c4R+&EZ;AUzj zWXloQ7Wrh4ez!rPc{2d#Dny*f{F}2H7s=N`ld7wB23^|QWD#9@R6OjAUk7wv?e`Sw z`7-BY$|VW5jo+si0T#~(b>7i4a@~!R4p-Tz} zM6H@34Cn?`nBXkP4wr*!&RKFBMOj31Y~g2f4ol`bJ*nOFiwz(gqgquWd^;sOC#p#? z(#jr}d4E}3o_ny3GB{Z4m3QDi69gS$=5A)5(~LUYhyFDe*iSSvrxPE)k>LL29S6=6 z(Z5Mf$mYfH$m6d22S-^=?FpG}R~XmYzj3#86f$q^4}QdQ+BFl=+3j*X6DK+vn&G+i zm6J=9U6llZh9mi@-zR$?ipkqz1}`-#JqH*3{h%9di@yvrkfhE+S6o|f{pp+|lNh~0 z)}@WS(&e`T7@!IPH~PtacvyXN=Q9fHyQvC#{)b1=eiMz_7)@&!D{CeNN%`;ESiLRY z_nk+BHoEnQ4JA}}A7>ZA@RfJ*m+{wm`Ff)ZDafO=me(8J5EkO*P3huzY+7)obpAcm zaPOj3v`or^0Q%Z|ku*NHhXcOt-gU;$vm+^`|7RszPMPE#J5M z&6Ky7C;CxjA4D@^1$WM0#?UNP?R_YHaC9rKOzQXb8w@|7$<`vzM4C!8DJYDu(W0CVVuid(kue?}(b=b>`TgLdwrp&5Seh@wiQ zmx6eFanY}su^$f((&JzD&5YjVQIn~d2o3Enm)j}shZljR^UyPa&V$Ia1!i6Al^({8^f2E}sx3paMTxM21BuPC23+_;oqw-g6hSpXz5>kZSD zn6e4fA93G$EQ_;`YBMqQ+;vid`Os!WDPEMW!8y785&sWC#2;O9`I^l6bgrYEJ&oRK z*|Uu*(St;6_J$nkg^k>U#5k+J^AmEnm+tUUdH*}p0)+EcT}j^aCqyL2V<2)X3?S2Y zi_ynhAUQ0h78(HFDFA7J8gHb>vzrx}b}qa#t!^H)n&v7AlDZTJRcbwp+NPnInJBp! z`(iG#I%w#+C$y?d(>6SuqqsZOT4{do5$e)#INme6mUNkG!=xDtkF6k>W-N9uBr1Ijktc)#6z6glX`3Cux%BtGs zWz=q1{C5vQn+4gv_Nm~aF;mDM>O+iPhL%m*eeZ{KV&D~EWqCt_fHePkbK!jbYut!_ zZtz@rvxFxQaL?KDPqZR2bE7cXWkN^heIzn`bo`}#cRxEx4r>VtRLVI%Bi*h;`NJ7}c<7Rk5MIlx#RI92m1GSy);>qcJnvxC?W{Q;8 zuWf62>G=jwV$APUeA}NO;SOY9Y4xj|pD%XB9)N_m8DYtK9rKqbBK%cg6NS*9odi+> zO|Wq*uiZyqz*53R=*3=kwDL@)0|c|9)1L0<;DfQ*osf142~*RIqF{v|Kt&JCCv}Fj zh5`+J zxmRal^gd`U4m;VJ*$&!GP)B?Z0Io|^iu&RYE}*bp3Q3vvxNSVEJp0;u52UOxx<^KS zgOurVeV>R&xDp;WqhDb)mvZu!XL^~`I8xu*9jRDVa~ZGgwdmrit-Z1qr_{N?O^pS1 zixixegd4^C8oU@aq4-hvjE`2f{U3GGo@X{3%Gh#o#J$j$5)JbCKcnFQ`<^yUhq|ub z#$-ey_k579e;i>o-yX@ePBk~t==#1aE}0yP#1zoH|o-yzi+DX|#tem2k9 zMX3&DVh!D!0AWl8K)?ByF>A`PIUA@cRHa+T)VoY*{;bTVxv+k~E_t_45%f=g&UzWY z{kE&r^3eS5&##P-WIZtU!yrbYz$$wcbL)12vRu)o80OFzN`{7_#9|ujtaASi0v_xK z%q76x^6Vk0jZO2*&em4-bCq?a^G}B*qEN{Os_1E9XMk4U9EBU>6>+~wG%=oaUEPoG zU6Jn=vZ>fuS(l@4IM$>Emv~;H-*$vi|Ap+-nDk7=MUCAT1e^yrnb^sz!>W==JXvKg zP0!F{9zaq==1j2e&y}uK<_laMYN-KRHPMG(e$ZTVl7x`CIQ<7WOkcA>h3jD!I!T#- zn4p6VpcgpApB8lTtV~L16H^^67eg@crb8Ih838?qAfi^*R3;*^50^lUO78H##JlWU zxm>6c^UX?6uux7*BXG60>pir|2mRg!xvj7hbzz{7BLs0uolyxetlfQw86;BxhH@n* zA16Tx(;04?MiVkSYu}391?TNu{AfGxD}aF|<)e5LTPlrM0Gm9j(JuqtNk}D9Y}M|H zKmKW-Z4pWejFpD|=jX+x9}V2FqyqTl-liK0(i73=EHa9(gf9dUpmL^p|FzhzrRwJ=4PTl#g6KX95+&0J2<;zM)sS??%%k8@E?Rcx^=m!_$BH-m% z^Yf;Au>sGxa82ULRi07RiCukpdjg6%8GI;Q<}X9-o2;v0a)e#h1E1$Nd3EHUAwiZU z#}d)COAaa4fwxBHW_2mil3=eP`r--(`?G)44pg(9H@2$RP~y7W=pkYKXDwYsrdQ>V zMoPu}=dl%NjyR7km`aIN+&Tvi2M377OY4N#Y`8g`Cxu$GW28M4P;*r|yN^)Wz-D97;Siq`erg&9c z4m}ybz#V2mTE#HZ{HN6nEDF34SFNalYi-oFBvP-I!tZcZ9ZlT@HtKkcapgt&d^pvi z2Mk+89{++9E~g;)JM@22&dHah;%cSOoUF)J5U~9nu88sSXjj8)5?~F-0PCu_TKMy^ z21QjA5+{>`4eEX~zl%fsY|A_{@B2w{_cRRPwyRE9@<_{Jg7dCW-pCWh`Q>@QJ&X&y<|sxw)F#`W71~1~%Shs9RFc`&1c=$X45yZg zR-!36HGD;&jgp2~vfHr*6E#0Ii3eUl$7YCwR%dWflgcH_w?Jb+<=;rpxJ+N`b;3<9 zRk&m};WKP?DZOrIdV$Mb)A$qIAMZM>g@w738h{-W+^GHm7S)J1B84u<;Pk0W5dZ;- z8iCe*(qF;lv4nMQUS_OoZ8>62dpRZ314w(u7Jg#kF1t4c;rrOVa2u*-JIBiJe<679 zv}$!Yq}wFFg}biTa~G<8rTlSipy4iERG#?9BDlj)h_wh>+t(C9y&O3J?8`Bu*o62ocVX~4@jTG#3N)KOC4w*a^8!Ccq5`*dxP7E0Tu|DU2t)#vo{ z9~mD78R2;%XLol9tq>uW7=g&t4!;B964y;flZ0~rlGAmTwUn-dm3Lf9pl!Bp3l_V+| z78~Wz^vas;^)Iln^*GfPB|buh|kcRaeJ4> znmsL;;rF4Pc=I_Ih1&S&Ur_#$l;P2A`XE6CBIq9hIjpd%Z9z-3fQZXz*$|Z!SbIx= zoSxzYVDMHjL5&xkDy_fgGEwPzApR2>#9zq{3xu0#JvTq>sz7}xV;btXD>)=@nKJNm z^dtfmXqx-3X%{y6J-?Yq(1=F&6zBFrP{bHF4nrFvxNo%k_Ik2%o&9U^XLwOt+(Jhw zj#XR>kZg9FfCWI=6X}uYwTZ9Q?b7laBG*W)eu|&kzXH%)Hp_tes>Ezt?X54>JwJ4~ z0}IZ$0rs+@_55BA*k2T2$iUH%Dg8tnt=OG;n!QK7_C7A!|FAjqZIcvnA2I{!3T|2M z)ig0SLQL7<^*B@ZU6N^PqswMv*_R+A8s$x^crANi!6O)nV_&K4M0b~q}BZX(DGeQB_hUXRrCI*8Ex zzWG$LS^o)?bQ!>S2}fN|4`oyWZdx60PX?>6KxTPcsltgP?SMD8LRTeVyvC|n`aL4{ z2>fB?Id5QN>dSb+(WU)j3>8XLw1oYT_t4^fnAkPPI_~^G_Ed6^URBrsKDSa%6pQ_U z>-k^?7CWZ4R#6)p61K!KX}>3~j`W@nHM-um+*Fkm>baH;xxE)03ySwq z1S?@dRK|bjqaZIs9bt_8Qf{9e2Rzr_BtX$r%E@>RJCv6YlG)mp#iid&y(2}p`epUp zyaXi*8bkl#BG3`wHp3m{6gzeD9B^G+mg+5B8OO`~1@zf|S@{)Q^yDcQHaX>&!ghy) z2<=7}XgkcJ{=44J@tvlfH9+J^m2@3j6aN>}#h?7r;5nENjX~m=+5`ruU`{i9}(QuXCCBcDAWyFRSN@c&Cdc*qw?mAw0qwfJ; z4G5qnn%J`Kv^??X~F z{gSu3U(vzAzTK|*@SGC}wOz2gjt|z$@`Mq`!Wy_8E}uBP1%Fw&M3zHHjR*)9;D&-b z*>G;KLvgbq%7<1tPs_cXr!RZIL8rvudLHG=z4%bdxviVua?W9i#biwE1cHDEX{a`L zu3QZuRo3)Fusmq0A@mG8>PyQ3QIOG1A#SY8v?`{(wPE*Y0VPhIMKGJ&lF>#=b~g7> zY{csV=ry@&{0tILl75bZzpiC1!+S;;l(vzBL+4ftcf@Iker5sxH1l8&7vjkXm6nIa zAVM1swoC4bIo5(~2Hq1L;3J|DY#w&eLz>KJjxahndn*Pu=MY4-4-PL#3KIWqbgT9A z^$C|4o%1mC9y*Qh*p>L(oRV&+tzMzcGF&}4C|$3_1M<)WM?}lxT$bJ^ZXE_?p;;2L zeJ%7KpYu(6lXi_5+m7?cT#FLyyk2&wD z`(63OTuk-a4HiYu?YC3tw3PX{ z{6Pn29;wDg){c~fx|YmpiWW`)L@-tPi;xvtwj#g+oI(zFD+_*>m9_3-@UGYEwR zB3(w^uYuwcwLX?FT!*z1SU+l^tFzt+6cphZ5imTTHpzTKgApz=3+nQ>%mIA6xspK# z=BW^ADR(BSgbFy$5o8~^nO0{prI`+_8Il4)TtCNur<8JR)OUzJXoKqJ-&6;1m=TDxUNwQB^ro?=3355YXKIB|Uwq z)0w>#b5I^S`}z>4=buz^v1g*D=Rl4;*p9QTYZhN+(MDZ@C2~T`|?^2!@ccVD&{PVaxzZR7Qtxc*O0u=#RdWM_3kU zOA!Rp&(+R9_w35MhZs9;tve8RVfG)?K#;*ZS~plkuDl@9b68J}{Y(!XgV}kLqo<_s z@2iiFbfU|mg}{h3oe9O!Ca~mO=(#1yAdvZoNqC%pds1?~b?b;LU3n>s!DH&-deEp* z9fuYgaECVe<|_7sCpV7sJLaa+@qA5gTqLHk&>2aw{G+?_P(-)iPD&Imz&$g&y)H8d z@-$f&bIy*~ecvXXC&O0rP*G7Ll#;;J0tQ$(LKZ16bLc_sNa&r2`|o!}D`FsP!gkXt z@MA_DlOdHTP2+O?Yd7#Wq6ORBgZ`2GY3n!vgP?;4 zb1feW(hcgqs-*0ON1gI*nostnHzfJ91^G`+ymeuOmkPy?qBmegQnz!w5F%U%7k)IW zXG!m_m}}=)=T@FLo+lcm4eR`mF=Y$tu(pSd3nuL9zzI+?quIs*3i4M9#<=)4`b)e% zCXmmgeujV!J^wFOB3;J64^~g1+OAPLDmiQYj-kPu7B8b077!kG?#*wsEx9GAEbG)i z{`1JPKNx8v#JZ#l>5PKTB@5d^D)8uk{f4imW=^{)0Q+lW4&BF>*SwWk(v|!G@d$~_ zBC~hTR$!`P@B&EipG~KY_8#KFX3xY5cOIT6<$vWg8geNHx4A#mA>;(IjNHIcw35Q0sp(P59-cTBgMv*5}Jon_@5HJs>7G@JhOIg==2CpLVRZB!~7 z{K~>?4X6M20^=ddKT`Bqi~X2;X_vrYpRwTTHu+D)2j7VbV<#X( zA4PWZIX7-56hbBTqu?81$J!&`?f**SV(7Pv;4@E+T398`vPMX761d&h=U`ImAVwd) zqCW8y52%4HbMppjgDt`5G5Z%vkSG}Z$6*e^2YMeFg(Fs~n5GW@B#wAabB0{0Aaj2N zRu>Z%TvYzEY~TcNWLviDFZfpQz^jioBJ-pRO~RjcXaROaxyZk^-ShrIzx|&c=gRqk z-v0mh{{Q?QPRV~aX#s$L?7TL(_p>;~8Iklugr_wvI=aN<-XQ-Z_=NfCtw0^`=eKa8 zC|GYN>F6{Iw-hl;QGfiN4}ul#{T59iDwXqF3R65hh>Hx1iRmp9^_`|^nD|W$rvT^a zvGr$tXr!^J--7%X)orGwB_n$1wyYMi>4F zh78sXM)CbZ+q&a>OGLAz+(Bk8AGMiXHCMa(d+ti-vFes@Q);gH#V_#aZ=zV7bft*` z2x*GGQ&il0;oaGcy^waWTGwls1CKj0)wF}|((M9ndmZ1lr6=K-(~Nc!b0Zp`j8S5I zpmlk6v)k0J2W>|6*GVdY)rO>+idg}DwKlX?w6n1)&cpK9+=wKO?h#gLV#kTUCBSBvpvmA*DvCXgYLJh#Yua#lAAz57xw2n_r5A1Rd>b~A&ydfg)Sm0cfNFdH)Y#=D zQ{ppf2aD)UnMuqMHfd%iuhF^G4!3oSm`&-`H&RZ%G2Ku#VOj=Ikx;b$7{v!Y0sZm0 z6kzAj{GR{^1Ni)R(U%=Bq^fROXa8B?p*7owfy0X&OOl3WhDmB+q2X(6*KXg|x&%qO zB<7d2wOn=O#z(5mzrJ+&CD}83=TU#RdhfCK{=gnJ-&T0e?6D8M_c&I6J>T1LPwN6E z(0h-)_XqZ<`L@DyW{-X7y~k1d>v{N+BuUzI9KIw;k|xyi@YU^W&};PGWAFWeJ!-zK z@SNFWAA0X`l>WNC;9im>X*25i%$Fod(x%h(@Rj8CBtIqjA<4%{u4!Go^@ zf0EqUx}G5=SxIs#$>}7owytMrN#0BHTaq7J*D{7AyOMmFJ)C}u-_^PVUXZK|T_!`*RvYw$;)yb+l z)w+%m+y$JjsxwvfH*gSmv~f*?s;Xna@2%??OI7_^Ro~CP{*&|fMBLG~s^!vqenGbx zW`1>G>u>|5aGBsV7620RHIYFv~4 zha>^s1x{Ag8?EbEM&{SjGQT=7c7}C5d;xC)+kvA=jwgAmbwNW)@>r5p;2z-9Bu84; zG=$kj#xDRzIDe7j2UT^VZB@fcasc=WxFyMIlIL32G_ZStr-3K?W`BFflRVVAs3p^N zenFoZW`1>J)C}_z?j=c*Ce-_xFG-T5O~sZ?0U4K-NC6WT{R{t;rMKNJOuYaA002ov JPDHLkV1jr_wMGB{ diff --git a/docs/_site/site/user_guide/plotting/plot_decision_regions_files/plot_decision_regions_25_0.png b/docs/_site/site/user_guide/plotting/plot_decision_regions_files/plot_decision_regions_25_0.png index 21b5fa4fa94d5e76b18a3965188e139a9b74e368..eac930bbf8d82646f1d644b425668bebd46b0515 100644 GIT binary patch literal 8234 zcmeHMXEzuRKF6&+I+Iye9zdX~|q^7(_NkT$Gt@Tvn1qsO& zb>O+<1{rWSHaPYH#5E5!Eu$O2<$uFA40xw-d-~dggybgeWg;D#g%N>|a-L62JPlp# zJbkR+fk~XLJ>8sKJ)InF?t6pZc{sYd+!q%X7nTsb@8Id_CNCoLp9;dR@9agm?`m8p zA>jzs(s*p-`*mj)>Yuopb-X{Xq$55_7q3d&!gJ*VWth$&yHd$$J;Uw7ijjJw@|Qmm zN@G*FQBcgR0bz4+EW5he#OJ#Mq*xnaitw6upuhX{_R}kq30mUaXhj?W#RIbWa6b*r zqdbnocB9Y<>vek>`Nk#CWUC@X7;x1l@r~ZRedo>{tp`b*fN+=kUyFIZNqj%jd93s& zJLm9R^A`|JTJk11^3k~SL_etq&frM=&Cb`WNT(M~AsWwN0gB*fOApî_4+K2r0 z#eU9LWmJdBYYqwyC``G(bg$=oyJ1Ka+aq-@F0Q4w|6Ra;lWk@(2|{6^L5z{i0XS1r z2*r~usqI$r(=k3SE^$+4NEcU%r-8g(;yD$b)W^F?i3dj>`io)9`hWZP z94-nQ19cq_Jcwx4($QJ}zJtf75%&t=J4dxZp^TD-&+u6+C|oD}_4#KVxr6q=Y#UR& zTe5=ci{57tGnQfDX0L%`++M9yDQc!{ms%TF@ZIy>cE&sOL+lC468?;t79hyq$;tZ1 z4J)~ir{@Xt0-;j74X!R$Hc)0TA(7OL$c#E5CW!D{oVEraMUVPXFAR7W1lg9f!xcyS}3 zA~ef(}iA+I1_^0 zYD4WWh&MD@33wwBolAVJIv)>DzH0h5RgC!L_e=2%WiJcZuk)K#t0`DsM8>BpBM1A= z1NUCH6oVQsIU}8KvK7;p?9sZ}pJh|%k_os^E0;(_bGjz?V=$~^waV}m0i=)IygcsS z*Zj5u`)|6dF#5K89s{ftY|b*r$NTu%G`~X%w9@IPJIrQ454>GvV=#ODcTjAXN!eyO z1^@`Kg{v(~{L-e9-2Xuw7x1gATp;Hcwn26k{;lN&7U7pj4#b=<=b;rm@2r12OuCc7 z{Q#VgsEgYR3}AK^(xYJBYlqsqFFWHN*a`ydoAeXXD}EC=h^UgT;k9ivWC+NFmz-Fu zbxtKECT8VvG&AO{31ymk7^rn-cum-McMCl2G66bPfg3CFkL()5`5t7UXvOCqhhlj7 zMEE#CJK944Z9KSLi_AOp?DdzT5IOltOZnG(itS*EY6efpE%G zoqwH`6WV!xaFi3e7M)h9t|g=%VE>u2@=SFbF0TM!D1LnOjs6ij`i$+*=gDR<4{!d0 zK*(8r3VgY4>NXD9bwX0)R{TzqEr zR_utl=d81UrtQogai#hW^FN`8juL>$b1hp0_WA%B=gRa*&*uO@wyp(K0Mo;M{!ik; z(co#^Gs~K9a0`N0I*YfBBD=nMRV2PwSo^2~sDy{qdu)4!V0@OEdupf1-o_kZ02;h* z&+8feBjulAMoPfdUbF8NZ=#-ED`Ses#B3m1tfMxLpmm2AZ+^if@&=i=SN@i0 z?y3WfAoR9wPu{kAfVsL_ z@3R+s%%+tG-jCK(#9XvV*-Jy+4Jq@#T6U%<)$rL8>vzm1t6*%y_8jrC`hc@BT6r9WXs883P50J;5r0SOJgxS<{r*eFcU3+TEWrZg?+C zZUH_4PSCN|Tjum<(!kU=MmlX;XqNU&8fvoq*DveyEf&UiGmiG}M%MLN#x4WvpgT!l zI4h?xprtyw&|8}?h01UHsq8PW6e1ryBk+nQoVeG$WCzRzNI`TSXj z;TGf#L`dcQIM7fT@wF0J!T05m&Lcxa3K4i%U2-!Yr6av`PJ{n>kQiQN_<{A@!R}@ujcxh*9$1jtU zeMi6X`V=5n?fIXHuHQl$V@u_C)CP@2E=l0D;a%SK9Tib5}GflaK+N{2hQ?Z}p1h~rvSRrsK&ct2T zC|TY~`y*+=L0RG&RZZT8=VchzruP6t)c;K(GJ1338;3=8s#O_V&{!uK>lVrF9SUoI z7>N^48c@{@1iZCp_ z(WvCR#V7$2_cOe%U{8!I+)wp-%*8TWfBmudtBVub<1|rWWlXL!^;fT*oi`rm1;pJ+ zLCi5hr+9^ZXhx0`ATSx-GIBSEsz$| zuVEd_GFGj=RJ_{{KyT!;qHkNkQ=Zm*+&6U^Sx}ZfVBj2V~`L&8GE8x|NY#85Q*vIkp#WHT1)gcbS^}hA_wf)e{ zAI-VJhlieu%Ie0NVM`TU`pR28YphZ%RxPkWm(c!af4IB`c-**!tzLUBbZxKT4R&U|_0BHqe3TWhla0e9t6`F4h}*NDjVQm~8UT-8T}pAxBYXgG5T4#+PGR;zpKALPng9nK8?to2P0BX?c+r zc2^v!oGy1JftF{8`~sd7J`jkdw_0i!uXZiv9J%w0!Yq=86$L1T_l8gGwKz87N^#`i zfy@;~(KT_jiNZTs! zxd*n1xr6oP5u4;&{K|?k$D6|1(Zcs<*mL#~ue9~~c>>=vII}3=t@`xC*Ve=~e%P2w zS68lPvH40gX((hT!li;|dUU6xeJa4Kw}vSFHPrQqC-v*;j=tqdT6y$wfvVC%Eb{q& zg>l;lR3FUL6Isnw$J$BQ5VIpnvl^4`Pb8aXwNd1pj~G*8WrQNC^}abZmnMe~QOY~T zhU0TSnbt%13`P@Kja%9$!Jk;Tj8xwAPOihRw|qoG!vKYmCJRipvG+6gv1X?-8`wiA{bbcE@?{jCf^NUupI+Vqy6EqNiI!5xrufWbUS9M1D=Zt< z)6R@kY4%Fg9NXeZZKOY4XDTTH$7TubX!2mGf4N&e@%` zyYvT0P2QaPQY}w0q2!dk(0J^*qznz8t>d{p%c{NJeb=`PJ1@oX?IcMSTBs;Ea^}5nzl2zBBhm*#&J7_NUfMfc5 zW(pxQu5HO|MP|q^^Jt9C?5SQO(rZ0-+PF9%Zn|G^31`sM&K(#ar@ZUOu^oxQF3<($ z)s@FJtbw1^WCwfFEtWu;Pn=S7=XUy{qGk?TEZyJN*Fr4F7N7CCHdV#}buJbANYmc( z5^wIp{4l)Poq6|r#eh!#4-zmRqiA!Nfd`0NX6{IKgICQUB&Mg5FIM6~frpW2w zR@dK)2u0}nq-ZS^7QBU*s3>^zM%!3VCMiOv z8y-i^u6}-AzSAm{MT#Q-S)c#LRL#y&KCYi1C~DZ8|EW`Ey1>Llt@W{-2}<1M)veVC z-e)*9I)L>Bfa>muZX_mFMkyj7CGp@qB+3Xal4GSUY?fJ&Wpmm3Dy@@q9`J!4s{_}ZNu^s@9jaO~$Ir9otoOKj+)QOXC7x9s$WN>sb z=Z6fCT!I*9RHv~DT9n0`Kb5UF`82TVR+=S4==MWqBVnPeoiqJ(i;F)e-$bg~D+hg< z#F2knhqm|z5>HNz&6d(WT6Yl-e3c{{hjoD-rYhR2k`)J>6wI!TPldwo#d?4qv>AnT z1~pt9`sl+qtxr!pFGWYT2vyy@6w;(4K26y!<>a;aFlw}>h_x!9)l&q@+jY3VahIUZ zYB)y6ft#)=iNqv8J_^dWV7}6FEQ?d=*yGa-msVSBL~h!hq#7=PHk?#sX7HlDrvW>M zjut?Dasj5>Ug{~}7hn>2u&#>j$@eNQRp1>^ya>R61T$9iD#;J>Y?s8WvKb8OL#q@y z^t(9y;3RXBG_yk16}?w|hZHBmu0y(KWfFEfGbcK(fx6P-gS`ah48=uran!+_)k-$x ztn;(Cd405bljr1y6>U?>~SK zsjT4*C7bgUiF0ELY7BSUgxt$7bqw2M+zpRcctEFUB2ZFwC1riYbkoX#l)c)4s_1ef+xm${trX8P{Wo*b8fk(ev~9^qD~L{VA-*e}$1_$zD@b+n%Za>tde3 zwp%N!O5H}8Z*t73erU-J!I^nfzt#3c$qik!^hx@Ax1eZtH^J0ScHUJc;nZb55i1kL zUQK=ZYd;l0Ih_!0oF1XL#Y9K2 zEcXK4F2&Hz=NZiJbAhzDus0W$+J+omoK{xOh8%DFwZzP@`Na*uDA!76tJ(($%V@+` ziD6BP7PrAx*axLsH*vUO`e176K~C(?tl`BV8=6^#`tF;m?Re3)o(etUNl{fRo_>b* zSH>b|(*Or1l9bu*{?yKG;(d+x)>E-~Fo~`psF4&HP=+b}|K0aYks`?n0r=780ts)7 zt!qetnTSMaX1eO~R_8R|aQlKT#@?VbESNM^e^PYy#AG~Y2w z)az>~bU45$*Q}aYKx(NjGphGIf6s0jboBML^^GHi^O=H>LUQkc=7Ou1^;wSQv12~+ z{PL11LO*>5h_4i`ldqP6U$W_KeK-+s{IuzpUtb!%-9th~BeeC=Xx38auh;S~apOZ3 z$~U5OR%a)Uw+~u^mMaA$^6P7&`=?y|(=5`*u1{7JM*otNGb$~~#us}>q6Vr;z6t&G ze)euf_FbOc3olnTU>YdDqq7xqOM2e8w`#+%09Kp4rstfyG7y{qNHS7d@{Y{fW%U>;7wrY!d+7Kh@g$ zrlT?kyGbnEIt?-x_2 ziKX5{TE~cm-Xm)^h5K!%$JkTFli{=X=}yy@60?C&y5d8B5lAQ z{ERwtqHj;x$nfw#d!Bx(mpecjZ}k3O{67T$#)6qX(*_hrO9$NcLL<-t)>n!*l({gu zczT30z&7}u50;VDU@qJ_;D_je4!_Y_T1_kj)+gd8FgmZ&x3`PuJc76phz&pf*;N0J z10#49)vOrT&jDh(B{t?W9#wA?cp-^wSPva3ZQa4{#cwnfXaPiQPx4ID{V#-cj_HXN zTeAhB$w278KxirMrhLvdWG_?YFcTJY7p*kOgf#}C8#+@1nL#B_Tkl=zOihUrb3H0q z47pp>3W#X=1Fsrs%|(gU#J)ys4{uaIXjUuYS%FurRq2mP$ZjjK$BkYfVQ0ZN(KK5&h;zbig%mP>Qj@gpI+QG! zsi3dSXq7QC9e^(CfH;*PUPYZcvvCoAOUR!dsJajlDO~ zI-95MMXaSLrMNEsdgMPX9a4(+F&R!yZ}*d)i&pgBQ+n@uPLTRjf;e7(XsTW=#*4&Z zsTMq1PHoZ7#b?n7?|4M@hY2JhuNcuAFXqDm(tWiT1wYK3Q=)h}l^{1ZC2#dp%8%awLW6H@t@TA(o^i4iXenFu{h=gO3hABa7+cAH&`zvnv{wQV@UTPDo s6UY|&&vq>A>gD0~|6+G`>Vlk%7566G?06J7*C)|>qOVb+X7%A;0J{c5#Q*>R literal 8462 zcmeHtcQjn@*Dn%8i-<%GLV}3i!f2BOK?o5NH3p-EA!_s)B6=6Se~BK^M~N~L%;+P+ z=p;soZuDVrPk!(3-uu4)yldV6?jP@2>p6R7KV?7rv-f`XI{Tc+=eimUv>dc#WMm9l znrbh|$S72S-`!WJfV+|2p*KJ-dpy=MyaHSSS8O7H|1_?eCLUyDbl3kj@{w5t31EUe z)r~z3TCoexcL7aAm-w3 zFD^?*>>(q&O{S&x$j~QybJjP&-1gfcVP08VYBEW{A+w(Q$K-hpuAn-B|C!8B zPZCqXZTS$R906JJi;$$aO06TQCqAO1-*XgCKmk9jRxP4pKG4k_NG5H`-i_W}pU)~~ zm(GAYuXEc%Gr8V`tqu&|B`kt1^p36{uBJRQ8)>`D#QrHVTQVu7)@}DW`*mPrMh5?d z_?xjlk4IYy?I>PuwH)Yb!Ht+|-=od3VoZ%}RAFLDjO;U3y-Kw4lagfKmmRfU^*&k6 zr-NPHQi+sI%cnr8TIrHujx8xtc`NuW?+h~++}U_`#~AK!xGc7)VE36lG3!D9}qZr`5fALtAJnsx{1 z)7sxi!q%Kz;;~ znJ=ktP3|9R3&!igd0xs&V`^utUYLd{w0tdX-VbA%hMuMJg z%&kdBoe}i^x<}%TwLgEBhWi7htWRO6#s zNn@|+(SsMJDx!qe!ir#AF%*4bz^wgYlaZ&?@ELgLD2c=5&yfm zWQ&Kd6Y{`o-6NDlBDHey_zM2#I3m2tqMbsJW2wG~^|`DDt6p?OMBxi7tHV-OZB`b$ zcn9-5Z2wBT8$YCVr(a$_TpA0`SyTOtmRXJfeN>o2+}haOJFWW@Vqom~T?jAL00i5{ zyz$$-`8>QXf)SHL%5~!m96>!30L&%Da$ogd-tpg&zn+7?IQ3O=UkDI0^Lz0%4VVIW zP1^;DH#**A2%~qp*ff}!Dm6Me*VhPc$*hcPD2(x5Ti7GwV!Z`zMSv&*`EWVsn0xmk zJsZ3QY+I#M10#4UiEm$^r_$<^^98q1bS3Bf-1DyXFVJSL8~(k*_n!6{15(^~9#q03 zdMd|#Hrm5KO3g8Cn`l}*viLhMIW2$a@YQ*Z8y6VlqHraNs&*O|B5(5+IT{)KGN-OY^z0GPsYG?Ecusy;Ib@>otU%;Dm2 z5fWAXU?2L5+kmX24@&Fl1-Q*3m*q~ya5>ca;`pQ-x+^$1nV+1R|E=zEcQfMlAtZ+j zI4CkmR9M=ZE9i5E;|25iLXcweNFWli=MlJ!W#HZN^*yTI3>^%#TUF0f{^N%7A1v|l z{RvrJdjmZX(x^L$5_doG6~=ru|BKZ_@v{TUb7P)`+_d#@=e&_!3Z8bS?nm*b2g0bq zXl$#Ry}w|8d`nQY%7x_6Ne?^p^_B#%Yed^*3eSwZ7(OZ@qUSvkm&p^?-!%UE zV$6-*yVSc&{Pm5$0*wDVbN{>K3qDj%C2`!d!+5JMYV!~ zARr!&&6j*>=8DI-4n@@&pGWR}RM6<_AKnY}4Q7D36qF%YZBwL;Pk#5yAD;2Z{~;(@ z@T3J06dbQYG+p!(gu%PKaemu01KG-Qk)K(B@H_Ke?p-ksJo4l4l6@ihDce>!&22_2 z8Z86F#!}RU2Pk}~Dqn7*pMNDF3VMNOD|fULa2OL3Q^j5^(Y!q!8!xkXq4jd?vBH`{ z^QIAmTK>;tLvzz7ZVv2Zlc45-(#m{fnZ7Mx(z~I#zPq5e&8v3hEJp5{2LTF>EzL{C zS>$NLh^P`410^I|(8i3D-H=MiGCH*|H9y%xy54H@nW1=@P2UEek_DrI9jks@;YUEt zr+vq<{$4fke!FEcB?Tcoa02(dHvyCYmX@;BeY7Eq&?SEvEex!{%IHq$ zSSKWq?(^Q8iLvx#g1`UC@l;9sPzvN!2ILFUV)uCdwEwxu5kbwKErT)^Ok+&)!6$Ye?1Wb4Y+|uTZw@ zNnng`YvWS~c2mH7rERF(hRsdjxB>2?a}ewZBgAVMY6>H6uU9T7n8=EpPAqAee*t3Z zRw-aK{iBD<&eICeD#Elvng2hz04vtP940z{F>(Q2&dvx}OzKanpZfj6kNn`5`Uod( zt_ydPCiFS`o1h9}HhoLrbR3+EC(M8ORK6^s(nk6~7C@?lSpA?WxobNJfKeOxfmN#t z<*#h!z4?#z6HuDD*J}Ej_T7*Rw8XVKimBZ3+FkqzQ~Bm5h_WAmaVPJZt;fSJ zsL_UqImh!K+rJYB8MHX{04%N!xe1DLco2O*hi+SR7`zMATuXN;ntvz?NUAKBy8D+r zSeCN+LJ3Q%NhLs?opJz{dxr>%KxcnRY}LW-*k=0Bf?qX>k!BGU-OxKB1*;YS6g`V0 z+xs7&0$#keK~QU$vL=vLze;bs&%H)2&IUy*n2(siXw>oRKu{)L!l|xxwXg3Re|4B>8KUI2+l(7>POMJA2!e9xSnMr#`JtQPO$pY(6!R( z=-t}p4S#Rbgknxb_HPm!`~*QP&YZ1n;Kh!>W-c4{Nrj>;!yNzTB z?zH56!ACbDLP2zPd;6VO_fw=??XM5p%PDD)qAJCZ@fCiBYmsB`sCPK3Sp=;K#|*|2 z+#hKCdow?3l?r<46wZJ15q;peW9t|3q<}5n>L0ln%=R}L#XE6wY!s+IiWXBKzy&jiDANLN$EOuT_Wpv`-w$Rgfs* z181YxwYi=62-;V5?BVjnvdeTN1ET6-TS0sYAX`lcvzr*koc2}O3i5PIXTg~BVCx%8 zB$$i!*^&54Lak7wZ^QOmEvI<-)Xy%H9i0BA>C$bF3zEy38%ERI7&~FsatlfvQHkp<(*EI7+CsNkcq|cJXrh zgD)SI{nuy?LR`PI;4finfI)zUSK&qsRcGr5OzfVwiIh5D>7{XT0YB!`%+B}G@3*t> zLe7pS2Tm3b$TrISa|S-Dl9isV-nTRN6(nn^gyi@*eX%`a;IT!gcw2LYyH3BATEL9X zAF|&EyQyvvswxY1@n9}DQsuf4p5T{3%usw%VT)m4-QAOc2sC1C25CQAr~#)BWn zYMj1xwSOwtKaoQCzR&8LUd(9`g}QGH4d%nl#1-W>ifr`Bf9eSrLCxe8w_xA1Hu;-=6AWB(~r}OqwYzt>;d1(QLs?7AoqtZ`#;|Pcb zu~%Kk%vh_5|MBb@L%?Fe9(mKz;b|!_08cl#F*m9hbvhl<&EaGdG%JU&9#!IeGuezK zl^;*&NucSc19y(sK5uczPc3PDE)j=sg6=TpB&M%)k9+AaGU(5*lZwo$yvlO8NL zEzO6bU0M8rYzvp$a=6`i|KZzrETY)%I82R<%9t*ZM%^aGc4S;if)BN8XZLel>EZ0u zlcy{xU`FgwoDR6xvxTG!)rUEnc4TZ)VJ+^dSsnD zBjA537$w(Vk40u@iLe(p_6oe;ihE!&t&fdeN!j^jko9JBgZ&-qi(=+PtF2Sk*@Y`b)N+&LhO6D#~%E*{)}DKJ1xato$-v-%J>m>*(N$$`($I? z7Q4mN=?Fh218q`BRu*ya8DoO;cX}IQ>A5C5;J^`wX2l{sw#&Oxrss3r1Gcs|xDqu^ zwPy#+EryaQo7GMVEF`n%P<+{kd)XkPCiik5a|DH!wI$Joi=RpZSB z;oFo-xlu~e>qM)Ur+m$0n43dJH=q1 zOUQ+r2n+kb*B_Lp8a|g%%;VC-86aD;h3Ix^yu1tmMmhgYCHHyg-xhi1r97*75akXC zwywD`*#GGym^L@?WIGbBWPz>5s|#cS8Hvy6VMW!@1c?Dbd!gCnrb!pxphd#dgtVs9gN~ zsiy`XXKc9so-l{k6`cdYI2T0wokS4ZlR@$Lan*j|O+(1@_d>2=w!`RCo_QC9bL$1c z3M6MDrDOZ%`o#WD+bxA|32#RMdn}=P`CEy`>~=rc4AVzcVM#qT0nKA%$E&z%;H&21 zlmb07Wf{pQs@cY%1hyk>xr(I?5cjg~;N5vD6GT(qoqo7ZuT+KUuazEm@$LHuwVt{0 z0I$OI&kEX1uGY0Y{LVt2Vk5fqJ1x%&t}C~3-=x@xdWXH|P&JkDemO<`zR4@meZs^b zr*T_{-Zt5dp&>N1lx8}2ZE!2}gCFKWZK|Zq{6TfAK z8zUtSo4fY{T0f6w^)%GMT17;TJiL@paUw_8ci6xCUhOuqQwtM~M?5-Wr@{7xnM&+_xUTZ0_SZ zR0@90aK67^rd|W1R6*YBSQef3-On&$p6-lWYV}o7@at?HVJi@wz-El;^XZ4*)Uc`Z z?e`kWWwrD+1J9l@rE)Ffi_ll{ubBJQ=M`Hf&q-vV0MF z%5NH1k!@)M3-t6gjhZcuM^;KfGLtMNCXIH6QPef-GPkQd{;%R)WG4vq96tayKFAHQJ@*XjUYcoGI z>cLT1o>PzY3c44jCXXF#wXDH`scDs)ZNGvfYc6goHc}E!l>Tk?4Eo%gUw8gYyn77o z6%W#mA1rJ`ru<|aEaYd?vN3*h`#S*CRPdS8+)qVE1PJH;Er5rTypvypNOM(QL~%e5 za=D>FjhTq_)eFq<)g~-g2~jf<)ka5#_z^qd)bsxttQ-3(PxnZY^hdMfC}Go zc%`DHfuO!*Y0Zvj@CoC}(_f6X#arserb5t`0X5mT2CB9;+iG9}c=kgaDcCsd-CMf< zaDpD14LKAA=>30kf|=s*?WJsvo}q{{{glO%CuggUJ0&Gi=Hg^Go9B-8^kLVh&HM`t zS$&Nau$VxV;f{r7M0r$ez2{}gM3q-VM}%L(j}Yf8Plf<>-X(^^9)uY;4`b^C7*T#g z-#_}Q*y$0fTl(|C6hH(_;_RC7-4f9&C+X7Ns!)v(kb#m&?)oQ%pSAcS(PPt-GFbFu z4{!^Q<8tf$B>D3n&iZ26(9*Ma%(K(}JGLBC52z<=DkDEacPDGwBM;NQ3Muf#bFFhY zOyBBCTjC#8dbl|oyZDM)!spM#^}@qks@nU~Wo;$z<3u01if~n-8&-zs=@nHP6{4qF zI98qiVA3{Yv6GsFMBmXbE`?`BZ(}gS4b53x>zVh!2wI{?ZCTsr#7gJ3iHIT(7^BO~ z?8Cts(CF;^!_z}CyIe!9*{@Hg9(X63C1?PQwJn!l`+ItLplNT&;g8)Nw;c+h+Sfkh z!?op*sJFi+@bOw3c(oiWOtg*wP>Xu%4rTR^=SPtRPyxqX-C^jfEuVJjM5~sg27{N2 zwghu9B;X&!C@D&QXQ$yF03VHFbzP^{;%M5%wWo_l@Rckga=BZ=i05|^6e?Whvihlc zTFYi)_kPlZ3!*{V5wSFIB|%Xb(3Nu)#_`Ybgq-xiv2s1?R)6Zq5lc)ago^RoCVX^m zOcU|!fk7N%@3;)ZDp31F0qC+og%8iUE*y_Q>6kM<*p8Ci3$F5qv+AF&Q#qB`8&lT z{fEabU^^)3-isflNfUECNE`bu3iLXBZ|^Gi3H5gyALxAD^RDYV-Q0|7&eo^DAe_*D zq-}b4r8hi2fZx%piL$Wu>|b~5Sa~0Fevh3yNgi?Ak>JmuY^8m7I-L1pPEkJ^1FMef z?mCKpA~MJ8{UNl4?{busbxHO+Ig^o9inW>y9q*)A4CUww;y2aWQCt9Q(Q=n=ZlJn4 zls=DO6Zadi2 zEk3PXC>$(4AO3Emnx1n8yT11eyTAUn*3I=F_6NWF6S`tna=4)c)6!7bcw5)5aB8u? zkknPfuo-B9-GEI4X$X@N>rmX%`}b-8YEa+b(q&WT|B(NIpea*)mM)=J^cR6?$5#JF z`p2!Jza_X^uubwnvb6S`v!t_pqAY4zZ=RHNc4u}TJh;6@6#ULHqjG^d^=WZQdhPc- zR@hetwLDI8uO2x&`$^i$m(l-=a6M<2LTA5?nRJYqw2L9``iMIw6J4ay z8?tDUG=m3L0!{kCwpT8>3_+)V9;prsl3WHs2eb*Y0m=-p9S-QZRgAC1mD)JJFmiNy(XknYecvNc_hb~!g2N!ONSe7ZL;!XM7>I?iE zTM{*i%lP9$)jSypH}#Jsnvq1=)Mt9CLp$y&>nf7q@Ga37X8c|i z`MJ}Vvm!}>OFa!ZuR<#>3rofwZrEm=Ar{PDq}T~mRS3kNnO4lasD*V+voIml_4V{> z;=9}SVJzg+6-XCj7YFyKr%W55R( z@C&b{7D+n=SC*ZA!$D9Y8+7+QbC4N%1@hABm5)sZL7U8?ngUJr{|5(=St8}u5P-sa zP=b^U!-VW2@bbl+N`MxsbJO+4juXf7AqUjr@*xH17i5`vr(O_@Ofb}=J%y568cmvI zOIn+^R-k)B^)^1w?gG$Iup7qaN$#qbxtv>(Y~{AvS1q%`S_tk;hDb6$KwT}Q&(LpZ z-a-UGb;PQ8S z$)y8)U3R3FEQAr%SdzT@#>)9`=16fON5SQ!cl7=LqG?chkewtZZ!^vxoBaX zc)0N;-f)s(gM0VqhJy9)kB;_a;>NJ75s4$4V*0Gx?5Sa-V44K0K5@(cko4?ftYopm z+=dt#Lb1I+Q+|QPG9C0=Yw)*lq zC#sp69qPWointDL^XEvYRrIJp#&(E72WF0o4T6$iKLmHt3j7gTrU6nUnqJ!;@#sWa zhnOOfzt`W0;;KM0K?m%EG|0tWvyg`ne0wc#jN}oSAUQueeYWErZpzF+=8Vvx@qQ4{ zle`j%Q%No$lHU)KB?uFgJK(sf`k94X0o#r+<6``!tp=RR#|2q)nwgt{lJHAS>CkH< zzy}48zuyAfOG)B{)<(`UyfeAK0vFWu4l4qoZlAFilU^X69g{}D7~X%C!~bhp{NL5_ c|Bf7@dUFnvbnZ#r2Skx+sq3m$KDG@0UmSb@wg3PC diff --git a/docs/_site/site/user_guide/plotting/plot_decision_regions_files/plot_decision_regions_27_0.png b/docs/_site/site/user_guide/plotting/plot_decision_regions_files/plot_decision_regions_27_0.png index 3d6161a67f8a61d76c3a8a3929094ead280bb150..b7a234b12d60d6007d067aff1caf474ccd60abdd 100644 GIT binary patch delta 24 fcmeC}XYB1~oRGt8q-Q)aUxGzh#D#TZH + Mlxtend.image + + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,11 +930,11 @@
    @@ -1035,7 +1043,7 @@

    API

    diff --git a/docs/_site/site/user_guide/plotting/plot_linear_regression/index.html b/docs/_site/site/user_guide/plotting/plot_linear_regression/index.html index 16a0a2abc..60bc6d213 100644 --- a/docs/_site/site/user_guide/plotting/plot_linear_regression/index.html +++ b/docs/_site/site/user_guide/plotting/plot_linear_regression/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1008,7 +1016,7 @@

    API

    diff --git a/docs/_site/site/user_guide/plotting/plot_sequential_feature_selection/index.html b/docs/_site/site/user_guide/plotting/plot_sequential_feature_selection/index.html index e6825f019..27f877533 100644 --- a/docs/_site/site/user_guide/plotting/plot_sequential_feature_selection/index.html +++ b/docs/_site/site/user_guide/plotting/plot_sequential_feature_selection/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -923,7 +931,7 @@
  • Plot Sequential Feature Selection
  • Overview
  • Example 1 - Plotting the results from SequentialFeatureSelector
  • - +
  • API
  • @@ -1022,7 +1030,7 @@

    API

    diff --git a/docs/_site/site/user_guide/plotting/scatterplotmatrix/index.html b/docs/_site/site/user_guide/plotting/scatterplotmatrix/index.html index 3cadb877a..a11d65541 100644 --- a/docs/_site/site/user_guide/plotting/scatterplotmatrix/index.html +++ b/docs/_site/site/user_guide/plotting/scatterplotmatrix/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1025,7 +1033,7 @@

    API

    diff --git a/docs/_site/site/user_guide/plotting/stacked_barplot/index.html b/docs/_site/site/user_guide/plotting/stacked_barplot/index.html index 322de67fd..35c448ff8 100644 --- a/docs/_site/site/user_guide/plotting/stacked_barplot/index.html +++ b/docs/_site/site/user_guide/plotting/stacked_barplot/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -1053,7 +1061,7 @@

    API

    diff --git a/docs/_site/site/user_guide/preprocessing/CopyTransformer/index.html b/docs/_site/site/user_guide/preprocessing/CopyTransformer/index.html index e6bf55a12..18ac3a23f 100644 --- a/docs/_site/site/user_guide/preprocessing/CopyTransformer/index.html +++ b/docs/_site/site/user_guide/preprocessing/CopyTransformer/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,10 +930,10 @@
    @@ -1086,7 +1094,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/preprocessing/DenseTransformer/index.html b/docs/_site/site/user_guide/preprocessing/DenseTransformer/index.html index b4c0bc9a2..ddad89c52 100644 --- a/docs/_site/site/user_guide/preprocessing/DenseTransformer/index.html +++ b/docs/_site/site/user_guide/preprocessing/DenseTransformer/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,10 +930,10 @@
    @@ -1086,7 +1094,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/preprocessing/MeanCenterer/index.html b/docs/_site/site/user_guide/preprocessing/MeanCenterer/index.html index 5c4eeb6a1..4c94a3614 100644 --- a/docs/_site/site/user_guide/preprocessing/MeanCenterer/index.html +++ b/docs/_site/site/user_guide/preprocessing/MeanCenterer/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,10 +930,10 @@
    @@ -1025,7 +1033,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/preprocessing/TransactionEncoder/index.html b/docs/_site/site/user_guide/preprocessing/TransactionEncoder/index.html index 260c38d82..dc7a68955 100644 --- a/docs/_site/site/user_guide/preprocessing/TransactionEncoder/index.html +++ b/docs/_site/site/user_guide/preprocessing/TransactionEncoder/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -925,7 +933,7 @@
  • Example 1
  • API
  • Methods
  • - +
    @@ -1273,7 +1281,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/preprocessing/minmax_scaling/index.html b/docs/_site/site/user_guide/preprocessing/minmax_scaling/index.html index 889ec7848..e5af41419 100644 --- a/docs/_site/site/user_guide/preprocessing/minmax_scaling/index.html +++ b/docs/_site/site/user_guide/preprocessing/minmax_scaling/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,11 +930,11 @@
    @@ -1123,7 +1131,7 @@

    API

    diff --git a/docs/_site/site/user_guide/preprocessing/one-hot_encoding/index.html b/docs/_site/site/user_guide/preprocessing/one-hot_encoding/index.html index b1476eb6f..aa66676fe 100644 --- a/docs/_site/site/user_guide/preprocessing/one-hot_encoding/index.html +++ b/docs/_site/site/user_guide/preprocessing/one-hot_encoding/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -1044,7 +1052,7 @@

    API

    diff --git a/docs/_site/site/user_guide/preprocessing/shuffle_arrays_unison/index.html b/docs/_site/site/user_guide/preprocessing/shuffle_arrays_unison/index.html index 6992e5b96..6adfcc02c 100644 --- a/docs/_site/site/user_guide/preprocessing/shuffle_arrays_unison/index.html +++ b/docs/_site/site/user_guide/preprocessing/shuffle_arrays_unison/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,9 +930,9 @@
    @@ -999,7 +1007,7 @@

    API

    diff --git a/docs/_site/site/user_guide/preprocessing/standardize/index.html b/docs/_site/site/user_guide/preprocessing/standardize/index.html index e8b69d313..d14a67f0e 100644 --- a/docs/_site/site/user_guide/preprocessing/standardize/index.html +++ b/docs/_site/site/user_guide/preprocessing/standardize/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,15 +930,15 @@
    @@ -1177,7 +1185,7 @@

    API

    diff --git a/docs/_site/site/user_guide/regressor/LinearRegression.ipynb b/docs/_site/site/user_guide/regressor/LinearRegression.ipynb index 8666bdc84..b102dd316 100644 --- a/docs/_site/site/user_guide/regressor/LinearRegression.ipynb +++ b/docs/_site/site/user_guide/regressor/LinearRegression.ipynb @@ -1,16 +1,5 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -149,12 +138,14 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfcAAAFkCAYAAAA9h3LKAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAPYQAAD2EBqD+naQAAIABJREFUeJzt3Xl4VdXZ9/HvEnFWpE6tVauVQLFODVqxVqw1r6FYRXys\nNSBWrcVZGnGs8ljtoB0szk8Hax2iaW0dqlVBsXWWqgkOSDQJ1qFOqGAcEQjr/WOlIlSUk5xk5+zz\n/VxXris5OcPtMeSXe++17xVijEiSpPxYIesCJElScRnukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk\n5YzhLklSzhjukiTljOEuSVLOGO6SJOVMweEeQtgwhHBlCOG1EMK7IYRHQwiV3VGcJEkq3IqF3DmE\nsDZwH3AHUA28BlQAc4tfmiRJ6oxQyMYxIYSzgR1jjLt0X0mSJKkrCj0svyfwcAjhmhDCKyGExhDC\nod1RmCRJ6pxCO/f3gAicA/wF+DJwHnBYjPHKj7j/OqTD988A84pQryRJ5WIVYFNgSozx9UIeWGi4\nvw88GGPc+UO3nQdsF2Pc6SPuPxq4qpCCJEnSEsbEGK8u5AEFLagDXgKalrqtCdhnGfd/BqCuro7B\ngwcX+FL5U1tby6RJk7IuI3O+D4v5XiS+D4v5XiS+D9DU1MQBBxwAHVlaiELD/T5g0FK3DQKeXcb9\n5wEMHjyYykqvluvXr5/vA74PH+Z7kfg+LOZ7kfg+LKHg09qFLqibBAwNIZwSQti847D7ocCFhb6w\nJEnqHgWFe4zxYWAUUAM8DpwKjI8x/rEbapMkSZ1Q6GF5Yoy3ALd0Qy2SJKkInC3fg2pqarIuoVfw\nfVjM9yLxfVjM9yLxfeiagi6FK/jJ08z5hoaGBhdGSJJUgMbGRoYMGQIwJMbYWMhj7dwlScoZw12S\npJwx3CVJyhnDXZKknDHcJUnKGcNdkqScMdwlScoZw12SpJwx3CVJyhnDXZKknDHcJUnKGcNdkqSc\nMdwlScoZw12SpJwx3CVJyhnDXZKknDHcJUnKGcNdkqScMdwlScoZw12SpJwx3CVJyhnDXZKknDHc\nJUnKGcNdkqScMdwlScoZw12SpJwx3CVJyhnDXZKknDHcJUnKGcNdkqScMdwlScoZw12SpJwx3CVJ\nyhnDXZKknDHcJUnKGcNdkqScMdwlSb3LM8/A9OlZV1HSDHdJUu+wcCGccw588Ytw4olZV1PSDHdJ\nUvYaGuDLX4YTToBDD4Xrrsu6opJWULiHEE4PISxa6mNmdxUnScq5t9+G445Lwd7eDtOmwXnnwZpr\nZl1ZSVuxE4+ZAewGhI6vFxavHElS2bjlFjjiCJg9G846C2proW9fmpubmTVrFgMGDKCioiLrKktS\nZw7LL4wxvhpjnN3xMafoVUmS8uvll2H//WGPPWDQIJgxA048kTlvvcXw4XswaNAgRowYwcCBAxk+\nfA/mzp2bdcUlpzPhXhFCeCGEMCuEUBdC2LjoVUmS8mfRIrjkEhg8GO64A668EqZMgc03B2D06LFM\nnToNqAOeA+qYOnUaNTUHZFl1SSo03KcBBwHVwOHAZsDdIYTVi1yXJClPnnwSdt0Vvvc9GDkSmprg\ngAMgpDO8zc3NTJlyC+3t5wNjgI2BMbS3n8eUKbfQ0tKSZfUlp6Bz7jHGKR/6ckYI4UHgWWA/4A/L\nelxtbS39+vVb4raamhpqamoKeXlJUql5/3342c/gJz+BTTaBqVNht93+626zZs3q+GzYUt/ZBYDW\n1tZcn3+vr6+nvr5+idva2to6/XydWVD3gRhjWwihGRjwcfebNGkSlZWVXXkpSVKpueceGDcOWlvT\ndeunnQarrvqRd92849A83E3q3P/jLgAGDPjYmCl5H9XwNjY2MmTIkE49X5eucw8hrEEK9pe68jyS\npBx54w047DAYNgzWXhsaG1PnvoxgBxg4cCDV1SPo0+dY0jn354E6+vQZT3X1iFx37d2h0OvcfxFC\nGBZC+FwI4SvA9cACoP4THipJyrsY4c9/Tgvm6uvhoovg3nthq62W6+H19XVUVQ0FxgKbAGOpqhpK\nfX1dd1adS4Uelt8IuBpYB3gVuBcYGmN8vdiFSZJKyHPPwVFHwd/+BqNGwQUXwGc/W9BT9O/fn8mT\nb6alpYXW1lavc++CQhfUuQJOkrRYe3sK8tNOg3790tjYUaO69JQVFRWGehc5W16S1DnTp8PQoWl8\n7EEHpcvbuhjsKg7DXZJUmHfeSavft98e5s2D+++HCy+EtdbKujJ16NKlcJKkMjNlChx+OLz0Evzo\nR3D88dC3b9ZVaSl27pKkTzZ7NowZA8OHw+c/D48/DqecYrD3UnbukqRlixEuuwwmTIAVVoDLL4ex\nYz8YG6veyc5dkvTRmpvh61+HQw6Bb34zLZg78ECDvQQY7pKkJc2fDz/+MWy9dbp+/bbb4IorYL31\nsq5My8nD8pKkxe6/P+3c9tRTcMIJMHEirLZa1lWpQHbukiRoa4Mjj4SddoI11oCGBjjrLIO9RNm5\nS1I5izFNlTvmGHjrLTj//BTyffpkXZm6wM5dksrV88/D3nvDvvumgTQzZ6aQN9hLnuEuSeXmP/Pg\nt9gCHnoIrr0WbrgBNt4468pUJIa7JJWTxx6Dr3wFjj02Xa/e1AT77OPlbTljuEtSOXj3XTj5ZKis\nhLffTvusX3xx2slNueOCOknKu9tvT/PgX3gBzjgjXeK20kpZV6VuZOcuSXn16qtpotzuu8Mmm6RD\n8qeearCXATt3ScqbGNNEuQkTYNEiuPTStN+659XLhp27JOVJaytUVaUwr66GJ5+Egw822MuM4S5J\nebBgQZoot9VW8PTTMHkyXHUVrL9+1pUpAx6Wl6RSN21amgff1ATHHQennw6rr551VcqQnbsklao3\n34Sjj07Xra+8chpI8/OfG+yyc5ekknTDDSnY33gDJk1Knzs2Vh3s3CWplLzwQpooN2oUbLttmgc/\nfrzBriUY7pJUCtrb4aKLYPDgtOf6NdfATTel69elpRjuktTbPf44fPWr6dB7TU1aOPetb3l5m5bJ\ncJek3uq999JEucpKaGuDe+6B3/wG+vfPujL1ci6ok6Te6I470jz4556DiRPhpJPSinhpOdi5S1Jv\n8tprabpcVRVsuCE8+ij87/8a7CqInbsk9QYxpolytbWwcCFcckkaG7uCPZgK50+NJGVt1qw0B37s\n2NSxNzXBd79rsKvT/MmRpKwsWAA/+xlsuSU0N8PNN0N9PXz601lXphLnYXlJysKDD6Z58DNmwPe/\nD2ecAWuskXVVygk7d0nqSW+9lSbKDR0KK66YQv6ccwx2FZWduyT1lBtvhKOOgjlz4Je/hGOPTQEv\nFZmduyR1txdfhH33hZEj037rTzyRtmY12NVNDHdJ6i6LFsGvf53mwd9zD/zxj2nR3KabZl2Zcs5w\nl6Tu8MQTsPPOcMQRsN9+6fK2b3/befDqEYa7JBXTvHlpXOyXvgSvvw533gm/+x186lNZV6Yy4gkf\nSSqWO++EcePgmWfglFPSxyqrZF2VypCduyR11Zw5aaLcrrvCBhvAI4+k69YNdmWkS+EeQjg5hLAo\nhPCrYhUkSSUjRrj6avjCF+Daa9N2rHfdBVtskXVlKnOdDvcQwvbAOODR4pUjSSXiX/+Cb3wDxoyB\nr30tLZgbN8558OoVOvVTGEJYA6gDDgXeKGpFkvQJmpubufXWW2lpaen5F1+4MA2g2XJLmDkTbroJ\nrrkGPvOZnq9FWobO/ol5EXBTjPHvxSxGkj7OnDlzGD58DwYNGsSIESMYOHAgw4fvwdy5c3umgIcf\nhi9/GU46KXXpM2fCN7/ZM68tFaDgcA8h7A9sC5xS/HIkadlGjx7L1KnTSAcOnwPqmDp1GjU1B3Tv\nC7/9dpoot8MOaTDNtGkwaZLz4NVrFXQpXAhhI+BcoCrGuKB7SpKk/9bc3MyUKbeQgn1Mx61jaG+P\nTJkylpaWFioqKor/wjffDEceCa++CmefnXZw69u3+K8jFVGh17kPAdYDGkP4YMxSH2BYCOFoYOUY\nY1z6QbW1tfTr12+J22pqaqipqelEyZLK0axZszo+G7bUd3YBoLW1tbjh/vLLafe2a66B6up0Dftm\nmxXv+aUPqa+vp76+fonb2traOv184SOyeNl3DmF14HNL3XwZ0AScHWNsWur+lUBDQ0MDlZWVnS5S\nkpqbmxk0aBBLdu50fD2W5ubm4oT7okXw+9/DiSemDv3cc6GmxrGx6nGNjY0MGTIEYEiMsbGQxxbU\nuccY3wFmfvi2EMI7wOtLB7skFdPAgQOprh7B1KnH0t4eSR37XfTpM56qqhHFCfamJjjssLTJy8EH\nwy9+Aeus0/XnlXpYMS7IXP7WX5K6oL6+jqqqocBYYBNgLFVVQ6mvr+vaE7//Pvzwh7Dttulw/N//\nDpdearCrZHV5tnyM8evFKESSPkn//v2ZPPlmWlpaaG1tZcCAAV3v2O++O3Xrra1w8slw6qmOjVXJ\nc+MYSSWnoqKi66E+d266Xv13v4Mdd4Tp09NgGikHDHdJ5SXGtAJ+/Hh47z24+OLUuTs2VjniT7Ok\n8vHss2mi3P77w1e/mhbQHXGEwa7c8SdaUv4tXJgmym2xBTz6KNxwA/zlL7DhhllXJnULw11Svk2f\nDkOHwoQJac/1mTNh5Misq5K6leEuKZ/eeQeOPx623x4WLIAHHoDzz4e11sq6MqnbuaBOUv7cems6\nl/7KK/CTn6RNX5wHrzJi5y4pP155BUaPhhEjoKICZsxIl7sZ7Cozdu6SSl+MaaLcCSdAnz5wxRVw\nwAHOg1fZsnOXVNqeegp23RUOPRT22itd3jZ2rMGusma4SypN8+fDj34EW28N//43TJ0Kl10G666b\ndWVS5jwsL6n03HcfjBsHzc3pUPzEibDqqllXJfUadu6SSscbb8Dhh6fpcmuuCY2N8NOfGuzSUuzc\nJfV+McK118Ixx6Tr1y+8MIV8nz5ZVyb1Snbuknq3559PE+W+9a00aW7mTDjqKINd+hiGu6Teqb0d\nzjsvzYNvaIDrroPrr4eNNsq6MqnXM9wl9T6PPJL2WK+the98J3Xro0ZlXZVUMgx3Sb3Hu++miXLb\nbZf2Wr/vvnR+vV+/rCuTSooL6iT1DrfdlhbJvfginHlm2vRlpZWyrkoqSXbukrI1e3YaFVtdDZtt\nBo8/Dj/4gcEudYGdu6RsxAiXX572WYc0Xe7AAx0bKxWBnbukntfSArvtBgcfnHZwe/LJtHDOYJeK\nwnCX1HPmz0/7q2+1FTz7bDrPfuWVsN56WVcm5YqH5SX1jAcegO99L3Xpxx8P//u/sNpqWVcl5ZKd\nu6Tu1daWJsrttFMK84YGOPtsg13qRnbukrrP9dfD0UfDm2/Cuec6NlbqIXbukorv3/9OE+X22QeG\nDEkT5o491mCXeojhLql42tvhoovSPPh//hP+8hf4619h442zrkwqK4a7pOJ4/PF0Xv3oo2HMmNSt\n/8//eHmblAHDXVLXvPdemihXWQlvvQX33gv/93+w9tpZVyaVLRfUSeq8O+6Aww5Le66ffjqceKJj\nY6VewM5dUuFeey1NlKuqSvurP/YYnHaawS71EnbukpZfjFBXl/ZZX7QIfv/7NELW8+pSr2LnLmn5\nzJoFu++eNneprk6T5g45xGCXeiHDXdLHW7AAfvYz2HJLaG2FW2+Fq66C9dfPujJJy2C4S1q2f/4T\nttsurYY/+miYMQOGD8+6KkmfwHCX9N/eeitNlNtxR+jbFx56CH7xC1h99awrk7QcXFAnaUk33phm\nwM+ZA+ecA8ccAyv6q0IqJXbukpIXX0wT5UaOhK23ThPmamsNdqkEGe5SuVu0KE2UGzwY7rsP/vQn\n+Nvf4HOfy7oySZ1kuEvl7IknYOed4cgj4dvfhqYm2G8/L2+TSlxB4R5CODyE8GgIoa3j4/4Qgktn\npVIzbx5MnAhf+lI6t3733fDb30L//llXJqkICj2Z9jxwEtACBOAg4K8hhG1jjE1Frk1Sd/jHP9I8\n+GefhVNPhZNPhpVX/q+7NTc3M2vWLAYMGEBFRUUGhUrqrII69xjjzTHGyTHGWTHG1hjjacDbwNDu\nKU9S0bz+epoo9/Wvw6c/DY88kjZ7WSrY58yZw/DhezBo0CBGjBjBwIEDGT58D+bOnZtR4ZIK1elz\n7iGEFUII+wOrAQ8UryRJRRUjXH11WjB33XXp8Pudd6avP8Lo0WOZOnUaUAc8B9Qxdeo0amoO6MGi\nJXVFwde4hBC2JIX5KsBbwKgY45PFLkxSETz9NBxxBNx2W1owd+65qWtfhubmZqZMuYUU7GM6bh1D\ne3tkypSxtLS0eIheKgGduYD1SWAboB+wL3BFCGHYxwV8bW0t/fr1W+K2mpoaampqOvHykj7RwoUw\naVI67L7eeunStj32+MSHzZo1q+OzYUt9ZxcAWltbDXepG9TX11NfX7/EbW1tbZ1+vhBj7FJBIYTb\ngdYY4xEf8b1KoKGhoYHKysouvY6k5fTQQzBuXNpjffx4OPNMWGON5Xpoc3MzgwYNYsnOnY6vx9Lc\n3Gy4Sz2ksbGRIUOGAAyJMTYW8thiXOe+AvDfS20l9ay3304T5YYOTdepP/gg/OpXyx3sAAMHDqS6\negR9+hxLCvTngTr69BlPdfUIg10qEYVe5/7TEMLOIYTPhRC2DCGcRTpeV9c95UlaLn/7G2yxRVos\n9/Ofp2BPf/EXrL6+jqqqocBYYBNgLFVVQ6mv95+5VCoKPee+PnA58BmgDXgM2D3G+PdiFyZpObz0\nUjr0/uc/p61YL74YNtusS0/Zv39/Jk++mZaWFlpbW73OXSpBBYV7jPHQ7ipEUgEWLYJLLoETT4SV\nVkqXuu2/f1HHxlZUVBjqUolytrxUambOhGHD0pS5ffeFJ5+EmhrnwUv6gOEulYp589KlbdtuC6++\nmsbIXnIJfOpTWVcmqZdxo2apFNx1V+rUn346zYL/wQ9glVWyrkpSL2XnLvVmc+fC974HX/sarLtu\nmgd/5pkGu6SPZecu9UYxwp/+lFbCz5sHv/51CvkV/Htc0ifzN4XU2zzzTBoVW1OTFs41NaVD8ga7\npOXkbwupt1i4ME2U++IX4fHH4a9/Tdevb7hh1pVJKjGGu9QbNDTADjvA8cenw+8zZ8Jee2VdlaQS\nZbhLWXr7bZgwAb78ZWhvh2nT0rasa66ZdWWSSpgL6qSs3HILHHkkzJ4NZ52VNn3p2zfrqiTlgJ27\n1NNeeSUtlttjDxg4EGbMSGNkDXZJRWLnLvWURYvg0kvhhBNgxRWhrg5Gj3ZsrKSis3OXesKTT8Ku\nu6bFcnvvnb4eM8Zgl9QtDHepO73/fpoot8028OKLcMcd8Ic/wDrrZF2ZpBzzsLzUXe65B8aNg9ZW\nOOkkOPVUWHXVrKuSVAbs3KVie+ONNFFu2DBYe22YPh1+/GODXVKPsXOXiiVG+Mtf4Nhj4Z134KKL\n4PDDHRsrqcf5W0cqhueeSxPl9tsPvvKVNA/+yCMNdkmZ8DeP1BXt7Wmi3BZbpMPv118P114Ln/1s\n1pVJKmOGu9RZ06fD0KFw3HFw8MFpHvzee2ddlSQZ7lLB3nknTZTbfvu01/r998MFF8Baa2VdmSQB\nLqiTCjNlSlok9/LLaQX8hAmOjZXU69i5S8tj9uw0UW74cNh887Tf+sknG+ySeiU7d+njxAiXXZY6\n9BVWgMsvh7FjHRsrqVezc5eWpbkZvv51OOQQ2HPPNA/+wAMNdkm9nuEuLW3+/HQ+feut0/Xrt9+e\nOvZ11826MklaLh6Wlz7s/vvTzm1PPZW2Zp04EVZbLeuqJKkgdu4SQFtbmii3006wxhrQ2AhnnWWw\nSypJdu4qbzHCddfBMcfAW2/B+eenkO/TJ+vKJKnT7NxVvp5/Pk2U23ffNJBm5swU8ga7pBJnuKv8\ntLeniXJbbAEPPZRmwd9wA2y8cdaVSVJRGO4qL489lnZtO/bYdL16UxPss4+Xt0nKFcNd5eHdd9NE\nucpKePttuPdeuPhi6Ncv68okqehcUKf8u/32NA/+hRfgjDPSJW4rrZR1VZLUbezclV+vvpomyu2+\nO2yySTokf+qpBruk3LNzV/7ECFdckebBL1oEl14KBx3keXVJZcPOXfnS2gpVVSnMq6vTPPiDDzbY\nJZUVw135sGBBmii31Vbw9NMweTJcdRWsv37WlUlSj/OwvErftGlpHnxTExx3HJx+Oqy+etZVSVJm\n7NxVut58E44+Ol23vvLKaSDNz39usEsqe3buKk033JCC/Y03YNKk9LljYyUJKLBzDyGcEkJ4MITw\nZgjhlRDC9SGEgd1VnPRfXnghTZQbNQq23TbNgx8/3mCXpA8p9LD8zsAFwA5AFdAXuC2EsGqxC1P5\naW5u5tZbb6WlpeW/v9neDhddBIMHpz3Xr7kGbropXb8uSVpCQYflY4wjPvx1COEgYDYwBLi3eGWp\nnMyZM4fRo8cyZcotH9xWXT2C+vo6+vfvD48/DuPGpYVz48bB2WdD//4ZVixJvVtXF9StDURgThFq\nUZkaPXosU6dOA+qA54A6pk6dxnf2q0kT5Soroa0N7rkHfvMbg12SPkGnF9SFEAJwLnBvjHFm8UpS\nOWlubu7o2OuAMR23jmGX9hmcM/VsFt31d1aYOBFOOimtiJckfaKurJa/GNgC2OmT7lhbW0u/pXbf\nqqmpoaampgsvrzyYNWtWx2fDAFiH1/glx3MQl3MX8NKFFzJs3LjM6pOknlBfX099ff0St7W1tXX6\n+UKMsfAHhXAhsCewc4zxuY+5XyXQ0NDQQGVlZaeLVH41NzczaNAg4ErGAJOoZUUWcjz/wx/4PU81\nN1NRUZF1mZLU4xobGxkyZAjAkBhjYyGPLbhz7wj2kcAuHxfs0vIYOHAgB+28C6PvOZj/x0L+yJ58\nn915rc/p7F41wmCXpE4oKNxDCBcDNcBewDshhA06vtUWY5xX7OKUcwsWwK9+xaUP/ZPZq/RlxLyF\n3MpNwE1UV6XV8pKkwhXauR9OWh1/51K3HwxcUYyCVCYefDDNg58xg/D977PBGWdw3ksvcUxrKwMG\nDLBjl6QuKPQ6d2fRq2veegtOOw0uuAC+9KUU8umcEhUVFYa6JBWBs+XVc268EY46CubMgV/+Eo49\nFlb0R1CSis1OXN3vxRdh331h5Mi03/oTT6StWQ12SeoWhru6z6JF8Otfp3nw99wDf/wj3HwzbLpp\n1pVJUq4Z7uoeTzwBO+8MRxwB++0HTU3w7W9DCFlXJkm5Z7iruObNg4kT02K511+HO++E3/0OPvWp\nrCuTpLLhSU8Vz513pl3bnnkGTjklfayyStZVSVLZsXNX182ZA9/9Luy6K2ywATzyCJxxhsEuSRmx\nc1fnxQj19fD978P8+Wk71kMPhRX8m1GSsuRvYXXOv/4F3/gGjBkDX/taWjA3bpzBLkm9gL+JVZiF\nC9MAmi9+EWbOhJtugmuugc98JuvKJEkdDHctv4cfhu23h5NOgsMOS+H+zW9mXZUkaSmGuz7Z229D\nbS3ssEM6zz5tGkyaBGuskXVlkqSP4II6fbybb4Yjj4RXX4Wzz06L5/r2zboqSdLHMNz10V5+GcaP\nT+fTd98d/vEP+Pzns65KkrQcPCyvJS1aBL/9LXzhCynQ6+pg8mSDXZJKiOGuxZqaYJdd0mK5ffZJ\nX48Z4zx4SSoxhrvg/ffhhz+EbbdNh+PvuAMuvRTWWSfryiRJneA593J3992pU29tTZe4nXoqrLpq\n1lVJkrrAzr1czZ2bJsrtsgv07w/Tp8OPf2ywS1IO2LmXmxjTCvjx4+G99+Dii1Pn7thYScoNf6OX\nk2efTRPl9t8fdtopTZg74giDXZJyxt/q5WDhwjRRbost4NFH4YYb4Npr4bOfzboySVI3MNzzbvp0\nGDoUJkxIe67PnAkjR2ZdlSSpGxnuefXOO3D88Wmjl/nz4YEH4PzzYa21sq5MktTNXFCXR7fems6l\nv/JKWgE/YYLz4CWpjNi558krr8Do0TBiBFRUwIwZcPLJBrsklRk79zyIMU2UO+GEtPL9iivggAMc\nGytJZcrOvdQ99RTsuisceijsuSc8+SSMHWuwS1IZM9xL1fz58KMfwdZbw7//DbffDpdfDuuum3Vl\nkqSMeVi+FN13Xxod29ycDsVPnOjYWEnSB+zcS8kbb8Dhh8NXvwprrgkNDfDTnxrskqQl2LmXghjT\nRLljjknXr19wQbrUrU+frCuTJPVCdu693fPPp4ly3/oW7LBDmjB39NEGuyRpmQz33qq9Hc47L82D\nb2iA665LM+E32ijryiRJvZzh3hs98gjsuCPU1sKBB6ZufdSorKuSJJUIw703efddOOkk2G679Pl9\n98FFF0G/fllXJkkqIS6o6y1uuy2thH/xRTjzzLTpy0orZV2VJKkE2blnbfbsNCq2uho23RQefxx+\n8AODXZLUaXbuWYkxTZSbMCF9/Yc/wHe+49hYSVKX2blnoaUFdtsNDj4YvvENaGqCgw4y2CVJRVFw\nuIcQdg4h3BhCeCGEsCiEsFd3FJZL8+fDT34CW20FzzwDkydDXR2sv37WlUmScqQznfvqwCPAkUAs\nbjk59sADMGQInH46jB+f9lqvrs66KklSDhV8zj3GOBmYDBCCx5GXS3s780eP5t2+fWm79lo+N3Jk\n1hVJknLMc+7dbM6cOQzfYy8qnnmGdVpa2HTvvRk+fA/mzp2bdWmSpJwy3LvZ6NFjmTp1Gs9RxyKe\nA+qYOnUaNTUHZF2aJCmnvBSuGzU3NzNlyi1AHTCm49YxtLdHpkwZS0tLCxUVFRlWKEnKox4J99ra\nWvotNUK1pqaGmpqannj5zMyaNavjs2FLfWcXAFpbWw13SRL19fXU19cvcVtbW1unn69Hwn3SpElU\nVlb2xEv1KptvvnnHZ3ezuHMHuAuAAQMG9HRJkqRe6KMa3sbGRoYMGdKp5ys43EMIqwMDgP+slP98\nCGEbYE6M8flOVZFTAwcOpLp6BFOnHkt7eyR17HfRp894qqpG2LVLkrpFZxbUbQdMBxpI17mfAzQC\nZxSxrtymfTCCAAAG+UlEQVSor6+jqmooMBbYBBhLVdVQ6uvrMq5MkpRXnbnO/S5cZb/c+vfvz+TJ\nN9PS0kJraysDBgywY5ckdStXy/eQiooKQ12S1CPswCVJyhnDXZKknDHcJUnKGcNdkqScMdwlScoZ\nw12SpJwx3CVJyhnDXZKknDHcJUnKGcNdkqScMdwlScoZw12SpJwx3CVJyhnDXZKknDHcJUnKGcNd\nkqScMdwlScoZw12SpJwx3CVJyhnDXZKknDHcJUnKGcNdkqScMdwlScoZw12SpJwx3CVJyhnDXZKk\nnDHcJUnKGcNdkqScMdwlScoZw12SpJwx3CVJyhnDXZKknDHcJUnKGcNdkqScMdwlScoZw12SpJwx\n3CVJyhnDXZKknDHcJUnKGcO9B9XX12ddQq/g+7CY70Xi+7CY70Xi+9A1nQr3EMJRIYR/hRDeCyFM\nCyFsX+zC8sgf1sT3YTHfi8T3YTHfi8T3oWsKDvcQwreBc4DTgS8BjwJTQgjrFrk2SZLUCZ3p3GuB\n38QYr4gxPgkcDrwLHFLUyiRJUqcUFO4hhL7AEOCO/9wWY4zAVGDH4pYmSZI6Y8UC778u0Ad4Zanb\nXwEGfcT9VwFoamoqvLIcamtro7GxMesyMuf7sJjvReL7sJjvReL7sER2rlLoY0NqvJfzziF8BngB\n2DHG+M8P3f4zYFiMccel7j8auKrQoiRJ0gfGxBivLuQBhXburwHtwAZL3b4B8PJH3H8KMAZ4BphX\n4GtJklTOVgE2JWVpQQrq3AFCCNOAf8YYx3d8HYDngPNjjL8otABJklRchXbuAL8CLgshNAAPklbP\nrwZcVsS6JElSJxUc7jHGazquaT+TdDj+EaA6xvhqsYuTJEmFK/iwvCRJ6t2cLS9JUs4Y7pIk5Uy3\nhrsbzEAIYecQwo0hhBdCCItCCHtlXVMWQginhBAeDCG8GUJ4JYRwfQhhYNZ19bQQwuEhhEdDCG0d\nH/eHEIZnXVfWQggnd/z7+FXWtfS0EMLpHf/tH/6YmXVdWQkhbBhCuDKE8FoI4d2Ofy+VWdfVkzpy\nc+mfiUUhhAuW9zm6LdzdYOYDq5MWHR4JlPMCh52BC4AdgCqgL3BbCGHVTKvqec8DJwGVpFHOfwf+\nGkIYnGlVGer4o38c6XdEuZpBWqD86Y6Pr2ZbTjZCCGsD9wHvA9XAYGACMDfLujKwHYt/Fj4N/D9S\nflyzvE/QbQvqlnE9/POk6+F/3i0v2suFEBYBe8cYb8y6lqx1/JE3mzTZ8N6s68lSCOF14PgY4x+y\nrqWnhRDWABqAI4CJwPQY43HZVtWzQginAyNjjGXVnX6UEMLZpAmou2RdS28SQjgXGBFjXO6jnd3S\nubvBjJbD2qS/ROdkXUhWQggrhBD2J82JeCDrejJyEXBTjPHvWReSsYqOU3ezQgh1IYSNsy4oI3sC\nD4cQruk4fdcYQjg066Ky1JGnY4DfF/K47jos/3EbzHy6m15TJaLjKM65wL0xxrI7txhC2DKE8Bbp\n0OPFwKiO7ZPLSscfNtsCp2RdS8amAQeRDkMfDmwG3B1CWD3LojLyedJRnKeA3YH/A84PIYzNtKps\njQL6AZcX8qDOTKiTuupiYAtgp6wLyciTwDakf7D7AleEEIaVU8CHEDYi/YFXFWNckHU9WYoxfnhu\n+IwQwoPAs8B+QLmdqlkBeDDGOLHj60dDCFuS/ui5MruyMnUIcGuM8aP2b1mm7urcC91gRmUihHAh\nMAL4WozxpazryUKMcWGM8ekY4/QY46mkhWTjs66rhw0B1gMaQwgLQggLgF2A8SGE+R1Hd8pSjLEN\naAYGZF1LBl4Clt4jvAnYJINaMhdC2IS0APl3hT62W8K94y/xBmC3/9zW8Y91N+D+7nhN9X4dwT4S\n2DXG+FzW9fQiKwArZ11ED5sKbEU6LL9Nx8fDQB2wTSzj0ZkdiwwHkIKu3NwHDFrqtkGkIxnl6BDS\n6exbCn1gdx6Wd4MZoOO82QDgP53I50MI2wBzYozPZ1dZzwohXAzUAHsB74QQ/nNUpy3GWDbbAYcQ\nfgrcStpJcU3SQpldSOcXy0aM8R1gifUWIYR3gNdjjEt3brkWQvgFcBMpwD4LnAEsAOqzrCsjk4D7\nQginkC772gE4FPheplVloKMhPgi4LMa4qNDHd1u4u8HMB7YD/kFaGR5J1/5DWhxxSFZFZeBw0n//\nnUvdfjBwRY9Xk531Sf/vPwO0AY8Bu7taHCjfORAbAVcD6wCvAvcCQ2OMr2daVQZijA+HEEYBZ5Mu\njfwXMD7G+MdsK8tEFbAxnVx34cYxkiTljLPlJUnKGcNdkqScMdwlScoZw12SpJwx3CVJyhnDXZKk\nnDHcJUnKGcNdkqScMdwlScoZw12SpJwx3CVJypn/Dw4reuLXj4wNAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAW4AAAD8CAYAAABXe05zAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4xLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvDW2N/gAAG11JREFUeJzt3WmUlOWZxvH/DdFhWjFgQKNR6XEyiYAIYgeDKCgo7vskxrRLEhVFNGqMDgZHYzwYVERQ2VpAUZugsskadtkFu9k3F5BGRaTVKEoDsjzz4W4nQVmqod56a7l+53Cq6+3qqrs+eJ3b530WCyEgIiKZo1rcBYiISNUouEVEMoyCW0Qkwyi4RUQyjIJbRCTDKLhFRDKMgltEJMMouEVEMoyCW0Qkw3wvijetU6dOyM/Pj+KtRUSyUmlp6SchhLqJvDaS4M7Pz6ekpCSKtxYRyUpmVpboazVUIiKSYRTcIiIZRsEtIpJhFNwiIhlGwS0ikmEU3CIiGUbBLSKSYRTcIiIHauNGuPtuWL8+JR+n4BYRORCjRkGDBvDkkzB+fEo+UsEtIrI/1q+Hq66CSy6B2rVhzhy4/vqUfLSCW0SkKkKAAQOgfn0YMQIefhhKS+HUU1NWQkLBbWZrzGyJmS00M21CIiK56d134eyz4YYboFEjWLQI7r+f4lcPJj8fqlWD/HwoLo62jKpsMnVWCOGTyCoREUlX27ZBt27w5z/DwQdDnz5w001QrRrFxdCuHVRU+EvLyvw5QGFhNOVoqEREZG9KS6FZM+jYEc4/H1asgJtv9vYa6NTpn6H9jYoKvx6VRIM7ABPMrNTM2u3uBWbWzsxKzKykvLw8eRWKiMRh0yb44x89tD/+GIYOhWHD4Oijd3nZ2rW7//M9XU+GRIO7RQihKXA+0MHMWn77BSGEohBCQQihoG7dhPYCFxFJTxMn+hj2E0/AjTfC8uVwxRW7felxx+3+LfZ0PRkSCu4QwrrKxw3AcKBZdCWJiMTk0099Sl/btnDQQTBtGvTtC7Vq7fFPOneGvLxdr+Xl+fWo7DO4zewQM6v5zc9AW2BpdCWJiKRYCDBokE/xGzTIB6gXLYKW3xlc+I7CQigqgnr1wMwfi4qiuzEJic0qORIYbmbfvH5QCOHv0ZUkIpJCZWXQvj2MG+fj2ZMn+zBJFRQWRhvU37bP4A4hrAYap6AWEZHU2bEDnnnmn9M/uneH226D6tXjrSsBkRwWLCKS1pYs8ZuO8+b5FL/evX2MI0NoHreI5I4tW+D++6FpU1i92pc4jhmTUaEN6rhFJFdMn+6rHd9+G667zqf61akTd1X7RR23iGS3zz/3lY6tWvnS9fHjYeDAjA1tUHCLSDYbPtz3yu7Xzw86WLLE52hnOA2ViEj2WbfOZ4gMHw5NmsDIkVBQEHdVSaOOW0Syx86dvvqlfn2fl92li88cyaLQBnXcIpIt3nrL91OdPh3OOssD/Mc/jruqSKjjFpHM9vXXvjFI48aweDH07++rH7M0tEEdt4hksrlzfSHN0qXwy19Cjx7wwx/GXVXk1HGLSOb56iu4805o3tyn+40cCS+/nBOhDeq4RSTTjBsHt9wC778Pt94KjzwChx0Wd1UppY5bRDLDhg2+Bd8FF8Ahh8DMmb5JVI6FNii4RSTdhQAvvOBT/F591Q/sXbAATjst7spio6ESEUlf773ny9UnTvSgfvZZXwmZ49Rxi0j62b7dN4E68UR44w3o2RNmzFBoV1LHLSLpZeFCn+JXWgoXXwy9esExx8RdVVpRxy0i6WHzZujY0Zenf/ABvPIKvPaaQns31HGLSPymTPHl6qtWwQ03wOOPQ+3acVeVttRxi0h8PvvMg7pNG38+ebJvwarQ3isFt4ikXgg+FFK/vh9q0LGj75XdunXclWUEDZWISGp98IGveBw1Ck45xU+kadIk7qoyijpuEUmNnTt9Wl+DBjBpkk/3e+MNhfZ+UMctItFbvtwP6p09G845B/r0geOPj7uqjKWOW0Sis3WrL1Fv0gRWrvSl6+PHK7QPkDpuEYnG7Nm+kGbFCt8c6sknoW7duKvKCuq4RSS5Nm6EDh3g9NNh0yYYOxZeekmhnUQKbhFJnpEj/eZj795wxx2wbBmcf37cVWUdBbeIHLj16/3osEsvhcMPhzlzfGjk0EPjriwrKbhFZP+FAAMG+EKakSP90N7SUjj11Lgry2q6OSki++fdd31/kalToWVLKCqCn/407qpygjpuEamabdugSxdo1Ajmz/fAnjpVoZ1C6rhFJHElJT7Fb9EiuOIKePppOProuKvKOeq4RWTfNm2Cu+/2sesNG2DYMBg6VKEdE3XcIrJ3Eyb4uY9r1vhjly5Qq1bcVeU0ddwisnuffgrXXw/nngv/9m8wfbrvMaLQjp2CW0R2FQIMGgQnnOCP99/v50CecUbclUklDZWIyD+VlUH79jBunI9nP/uszx6RtJJwx21m1c1sgZmNjrIgEYnBjh3Qowc0bOhDIj16wKxZCu00VZWhkjuAFVEVIiJQXAz5+VCtmj8WF6fgQ5csgdNOgzvvhFatfO/s3/8eqldPwYfL/kgouM3sGOBCoF+05YjkruJiX4hYVubDzGVl/jyy8N6yxcevmzaF997z8ezRo+G44yL6QEmWRDvu7sC9wM4IaxHJaZ06QUXFrtcqKvx60k2fDo0b+94ihYW+Z/bVV4NZBB8mybbP4Dazi4ANIYTSfbyunZmVmFlJeXl50goUyRVr11bt+n75/HOfi92qlS9dnzABnn8efvCDJH6IRC2RjrsFcImZrQEGA63N7KVvvyiEUBRCKAghFNTVhukiVbanEYqkjVwMG+Z7ZffrB/fcA0uX+vmPknH2GdwhhPtCCMeEEPKBXwFTQgjXRF6ZSI7p3Bny8na9lpfn1w/IunW+r8iVV8KRR8K8efDYY9/9MMkYWoAjkiYKC32jvXr1fKi5Xj1/Xli4n2+4cyf07et7ZY8bB48+6qF9yilJrVtSz0IISX/TgoKCUFJSkvT3FZEEvfUW3HQTzJgBrVt7gP/4x3FXJXthZqUhhIJEXquOWySbfP21j62cdJKPYQ8YAJMmKbSzjJa8i2SLuXN9r+ylS+Gqq3z145FHxl2VREAdt0im++orP1G9eXOf7jdqFAwerNDOYuq4RTLZ2LG+KdT770OHDvDII1CzZtxVScTUcYtkog0b4Ne/hgsvhEMP9Q2hnn5aoZ0jFNwimSQEGDjQp/gNHQoPPQQLFvgwieQMDZWIZIrVq325+qRJ0KKF75Vdv37cVUkM1HGLpLvt2+GJJ+DEE33mSK9evkmUQjtnqeMWSWcLFvhCmtJSuPRSeOYZOOaYuKuSmKnjFklHmzdDx47ws5/Bhx/CkCEwfLhCWwB13CLpZ8oUP0Fh1SpfUPPYY1C7dtxVSRpRxy2SLj77DG64Adq08V2mpkzxG5AKbfkWBbdI3EKAV17xm40DB8J998HixXDWWXFXJmlKQyUicfpmxeOoUVBQ4CfSNG4cd1WS5tRxi8Rh507o2dNPpJk8Gbp1gzfeUGhLQtRxi6TasmU+xW/OHDj3XOjdG/7jP+KuSjKIOm6RVNm6Ff78Zzj5ZHj7bXjxRT+ZRqEtVaSOWyQVZs3yLnvFCrjmGh8a0aHasp/UcYtEaeNGv/l4+ulQUeEd9osvKrTlgCi4RaIycqTffOzTB+66y0+mOe+8uKuSLKDgFkm29evhl7/0vUUOP9xvQnbr5vtmiySBglskWUKA/v19Ic3IkX4aTWkpNGsWd2WSZXRzUiQZ3nnH98qeOhVatYKiIvjJT+KuSrKUOm6RA7FtG3TpAiedBPPne2BPmaLQlkip4xbZXyUlvnvfokVw5ZV+5uNRR8VdleQAddwiVbVpE9x9N5x6KpSX+z7ZQ4YotCVl1HGLVMX48XDLLbBmjT926QLf/37cVUmOUcctkohPPoHrrvN52DVq+JmPvXsrtCUWCm6RvQkBiot9it/gwfC//+vnQJ5xRtyVSQ7TUInInpSV+XDI3//u49n9+vlJ6yIxU8ct8m07dkD37tCwIcycCU895ZtEKbQlTajjFvlXixf7FL8334QLLvBx7OOOi7sqkV2o4xYB2LIFOnWCU07xGSN/+xuMHq3QlrSkjltk2jRo184PN/jNb6BrV/jBD+KuSmSP1HFL7vr8cw/sM8/0pesTJ8Jzzym0Je0puCU3DRvmU/z694d77vG9ss8+O+6qRBKioRLJLR9+CLfdBiNG+NmPY8ZA06ZxVyVSJeq4JTfs3Okn0TRo4POyH3sM5s1TaEtGUsct2W/lSj+od+ZMaNMG+vaF//zPuKsS2W/77LjNrIaZzTOzRWa2zMweSkVhIgfs66/h4YehcWNYtsxvPE6cqNCWjJfIUMlWoHUIoTHQBDjPzH4ebVkiB+iNN3wY5IEH4IorYMUKn+pntsvLioshPx+qVfPH4uI4ihWpmn0Gd3BfVT49qPJfiLQqkf315Zfw+9/DaafBF1/AqFG+mObII7/z0uJinw1YVuZ7SZWV+XOFt6S7hG5Omll1M1sIbAAmhhDmRluWyH4YM8b3F3nmGZ85snw5XHTRHl/eqRNUVOx6raLCr4uks4SCO4SwI4TQBDgGaGZm39ltx8zamVmJmZWUl5cnu06RPduwAa6+2kP6sMN8Q6innoKaNff6Z2vXVu26SLqo0nTAEMLnwOvAebv5XVEIoSCEUFC3bt0klSeyFyHAwIG+kGbYMPjLX/zA3ubNE/rzPW1Dou1JJN0lMqukrpnVqvz534GzgZVRFyayV6tWQdu2fsOxQQNYuNAPOTj44ITfonNnyMvb9Vpenl8XSWeJdNxHAVPNbDHwJj7GPTraskT2YPt23wSqUSOYO9e3XZ02zbvuKioshKIiqFfPJ5vUq+fPCwsjqFskiSyE5E8QKSgoCCUlJUl/X8lx8+f7Qpr58+HSS6FnT/jRj+KuSiQpzKw0hFCQyGu15F3SX0UF3HsvNGsG69bBkCEwfLhCW3KWlrxLeps82SdXr17t3fajj0Lt2nFXJRIrddySnj77DH77W99qtXp1mDrVB6AV2iIKbkkzIcDLL/vNxpdegj/9CRYt8sMORATQUImkk7Vr4dZbfQXkz34GEyb4BlEisgt13BK/HTt8mXrDhj4k8uSTMGeOQltkD9RxS7yWLYMbb/Td/M491w87yM+PuyqRtKaOW+KxdSs8+KAfH/bOOz6ePW6cQlskAeq4JfVmzvSpfStXwjXXQLduoP1tRBKmjltS54sv/ObjGWfA5s1+9uOLLyq0RapIwS2p8dprvhlU375w112wdKmPaYtIlSm4JVoffQS/+AVcdhnUqeM3Ibt1g0MPjbsykYyl4JZohAD9+vlCmlGj4JFHoKTE52eLyAHRzUlJvrff9v1Fpk3zFY99+8JPfhJ3VSJZQx23JM+2bfDXv8JJJ/nBBv36wZQpCm2RJFPHLcnx5pu+kGbxYvjv//YzH486Ku6qRLKSOm45MJs2wR/+AD//OXzyCYwYAa++qtAWiZA6btl/48fDLbfAmjXQvr0Pk3z/+3FXJZL11HFL1X3yCVx7LZx3HtSoATNmQK9eCm2RFFFwS+JC8D1FTjjB98x+4AG/CXn66XFXJpJTNFQiiVmzxodFxo/38ex+/XwbVhFJOXXcsnc7dvj+2A0bwqxZ8PTTvkmUQlskNuq4Zc8WLfJd/N58Ey66yMexjz027qpEcp46bvmuzZv9rMeCAigrg8GDYeRIhbZImlDHLbt6/XVfrv7OO37KeteucPjhcVclIv9CHbe4f/zDh0XOOsvHtSdNggEDFNoiaUjBnetCgCFDfK/s556De++FJUugTZu4KxORPdBQSS778EPo0MEPOWjaFMaO9TMgRSStqePORTt3Qu/e3mVPmACPPw5z5yq0RTKEOu5cs2KFj2XPmgVnn+17ZR9/fNxViUgVqOPOFV9/DX/5CzRpAsuXw/PPe7et0BbJOOq4c8GcOd5lL1sGV18N3bvDEUfEXZWI7Cd13Nnsyy/h9tuhRQvYuBFGj4ZBgxTaIhlOwZ2tRo/2m489e3p4L1sGF14Yd1UikgQK7mzz8cfwq1/BxRf7/tizZ0OPHlCzZtyViUiSKLizRQi+gKZ+fRg+HB5+GObP9y1YRSSr6OZkNli1Cm6+GSZPhjPOgKIiP+xARLKSOu5Mtn07PPYYNGrkW6/26eObRCm0RbKaOu5MNX8+3HgjLFgAl10GzzwDP/pR3FWJSArss+M2s2PNbKqZrTCzZWZ2RyoKkz2oqIB77oFmzeCjj2DoUB/TVmiL5IxEhkq2A3eHEOoDPwc6mFmDaMvKbcXFkJ8P1ar5Y3Fx5S8mTfJhka5d4YYbfPn6FVfEWKmIxGGfwR1C+CiEML/y5y+BFYDau4gUF/s5BmVlPlGkrAw63vQpq1r+Bs45B6pX93Hsvn2hVq24yxWRGFTp5qSZ5QMnA3OjKEagUycfDXGBqxhM6eb6HDej2I8TW7wYWrWKs0QRiVnCwW1mhwJDgTtDCBt38/t2ZlZiZiXl5eXJrDGnrF3rj8eyllFczGCuZg35FFAKnTtDjRrxFigisUsouM3sIDy0i0MIw3b3mhBCUQihIIRQULdu3WTWmFPyj93BbTzNchpwJq9zJ0/SnDl8Ue+kuEsTkTSxz+mAZmZAf2BFCKFb9CXlsKVLmXfQjdRhLuM4j/b0pox88vK82RYRgcQ67hbAtUBrM1tY+e+CiOvKLVu2wAMPwMknU+eLVcy6tZj2x41lreVTr54vhCwsjLtIEUkX++y4QwgzAUtBLblpxgzfK/utt+Daa6FbN1rUqcOannEXJiLpSkve4/LFF3DLLdCyJWzdCuPHwwsvQJ06cVcmImlOwR2HESN8r+xnn4W774alS6Ft27irEpEMoeBOpXXr4Mor4fLLoW5dP1m9a1c45JC4KxORDKLgToWdO727btAAxo6Fv/7Vd/MrKIi7MhHJQNodMGpvveVr2KdPhzPP9Cki//VfcVclIhlMHXdUtm2DRx6Bxo19mXq/fjBlikJbRA6YOu4ozJvne2UvWQK/+AU89RT88IdxVyUiWUIddzJ99RXcdZef8/jZZ/Daa/DKKwptEUkqddzJMm4ctG/v+7DeeqvfgDzssLirEpEspI77QJWXwzXXwAUXQF4ezJwJPXsqtEUkMgru/RUCvPgi1K/vwyEPPujnP7ZoEXdlIpLlNFSyP957D26+GSZOhObNfY52w4ZxVyUiOUIdd1Vs3w7dusGJJ8KcOX6y+syZCm0RSSl13IlauNCn+JWWwkUXQa9ecOyxcVclIjlIHfe+bN4M993ny9Pffx9efhlGjlRoi0hs1HHvzdSpvlz93Xfhd7+Dxx+Hww+PuyoRyXHquHfnH//wYZHWrX32yKRJ0L+/QltE0oKC+1+FAK++6lP8nn8e/ud/fJ+RNm3irkxE5P9pqOQbH3wAHTr4+HXTpr4S8uST465KROQ71HHv3OkzRBo08HnZXbv6AQcKbRFJU7ndcS9f7gf1zp4N55wDffrA8cfHXZWIyF7lZse9dSs89BA0aQIrV8LAgX5Yr0JbRDJA7nXcs2d7l718Ofz61/Dkk3DEEXFXJSKSsNzpuDduhNtug9NPhy+/hDFjoLhYoS0iGSc3gnvUKL/52KsX3H47LFvm27CKiGSg7A7u9evhqqvgkkugdm3fGKpHD6hZM+7KRET2W3YGdwgwYIAvpBkxAh5+2DeHOvXUuCsTETlg2Xdz8t13fa/sKVPgjDOgqAhOOCHuqkREkiZ7Ou5t2+DRR6FRIygp8TnZr7+u0BaRrJMdHXdpqW8KtXAhXH65H3Bw9NFxVyUiEonM7rg3bYI//hGaNYOPP4ahQ2HYMIW2iGS1zO24J070sez33vM9sx99FGrVirsqEZHIZV7H/emncP310LYtHHQQTJsGffsqtEUkZ2ROcIcAgwb5FL9Bg6BTJ1i0CFq2jLsyEZGUyoyhkrIyaN/e98hu1gwmT/bZIyIiOSi9O+4dO3ylY8OGMH06dO/um0QptEUkh6Vvx71kiU/xmzcPzj8feveGevXirkpEJHbp13Fv2QL33+/Hh61e7Tv4jRmj0BYRqZReHfesWfC738Hbb8N118ETT0CdOnFXJSKSVvbZcZvZADPbYGZLoyykuBhuvnwD7739NdceMZ7itgMV2iIiu5FIx/088AzwQlRFFBf7GpqKissZyPls3VCDYe38d4WFUX2qiEhm2mfHHUKYDnwWZRGdOkFFhf+8lRqAP+/UKcpPFRHJTEm7OWlm7cysxMxKysvLq/S3a9dW7bqISC5LWnCHEIpCCAUhhIK6detW6W+PO65q10VEcllaTAfs3Bny8na9lpfn10VEZFdpEdyFhX5QTb16YOaPRUW6MSkisjv7nFViZn8DzgTqmNkHwIMhhP7JLqSwUEEtIpKIfQZ3COHqVBQiIiKJSYuhEhERSZyCW0Qkwyi4RUQyjIJbRCTDKLhFRDKMhRCS/6Zm5UDZfv55HeCTJJaTCfSds1+ufV/Qd66qeiGEhJadRxLcB8LMSkIIBXHXkUr6ztkv174v6DtHSUMlIiIZRsEtIpJh0jG4i+IuIAb6ztkv174v6DtHJu3GuEVEZO/SseMWEZG9SJvgTtWhxOnEzI41s6lmtsLMlpnZHXHXFCUzq2Fm88xsUeX3fSjumlLFzKqb2QIzGx13LalgZmvMbImZLTSzkrjriZqZ1TKzIWa2svK/5+aRfl66DJWYWUvgK+CFEMKJcdeTCmZ2FHBUCGG+mdUESoHLQgjLYy4tEmZmwCEhhK/M7CBgJnBHCOGNmEuLnJn9ASgADgshXBR3PVEzszVAQQghJ+Zxm9lAYEYIoZ+ZHQzkhRA+j+rz0qbjTsWhxOkmhPBRCGF+5c9fAiuAH8VbVXSC+6ry6UGV/9Kjc4iQmR0DXAj0i7sWST4zOwxoCfQHCCF8HWVoQxoFd64zs3zgZGBuvJVEq3LIYCGwAZgYQsjq71upO3AvsDPuQlIoABPMrNTM2sVdTMSOB8qB5yqHw/qZ2SFRfqCCOw2Y2aHAUODOEMLGuOuJUghhRwihCXAM0MzMsnpYzMwuAjaEEErjriXFWoQQmgLnAx0qh0Kz1feApkDvEMLJwCagY5QfqOCOWeVY71CgOIQwLO56UqXyfyVfB86LuZSotQAuqRzzHQy0NrOX4i0peiGEdZWPG4DhQLN4K4rUB8AH//J/j0PwII+MgjtGlTfr+gMrQgjd4q4namZW18xqVf7878DZwMp4q4pWCOG+EMIxIYR84FfAlBDCNTGXFSkzO6TyZjuVQwZtgaydLRZCWA+8b2Y/rbzUBoh0gsE+z5xMlVQdSpxmWgDXAksqx30B/hRCGBtjTVE6ChhoZtXxpuGVEEJOTI/LMUcCw70v4XvAoBDC3+MtKXK3A8WVM0pWA7+N8sPSZjqgiIgkRkMlIiIZRsEtIpJhFNwiIhlGwS0ikmEU3CIiGUbBLSKSYRTcIiIZRsEtIpJh/g/fd/K2NzDSJgAAAABJRU5ErkJggg==\n", "text/plain": [ - "" + "
    " ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], @@ -197,25 +188,27 @@ "name": "stderr", "output_type": "stream", "text": [ - "Iteration: 100/100 | Cost 0.08 | Elapsed: 0:00:00 | ETA: 0:00:00" + "Iteration: 100/100 | Cost 0.08 | Elapsed: 0:00:00 | ETA: 0:00:000" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Intercept: 0.82\n", - "Slope: 0.22\n" + "Intercept: 0.22\n", + "Slope: 0.82\n" ] }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfcAAAFkCAYAAAA9h3LKAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAPYQAAD2EBqD+naQAAIABJREFUeJzt3Xd4V+X9//HnDVJ3EUcdVdsqgR+4iVqssxoFUdtSRw2j\nRYsCggzFIu7RqrV111FbNzXVuq0Mi1b9ogWVqICgCdhK3QqIigMI9++POw4oKp+sk8/J83Fdua7k\n8Blvj0leeZ9zjxBjRJIk5UerrAuQJEkNy3CXJClnDHdJknLGcJckKWcMd0mScsZwlyQpZwx3SZJy\nxnCXJClnDHdJknLGcJckKWcKDvcQwmYhhFtCCO+EED4MITwXQujSGMVJkqTCrVbIg0MI6wGPAw8B\n3YB3gBJgQcOXJkmS6iIUsnFMCOECYLcY496NV5IkSaqPQi/LHwI8HUK4PYTwZgihMoTQvzEKkyRJ\ndVNo5/4REIGLgDuAXYHLgAExxltW8vgNSJfv/wN83AD1SpLUUqwBfBeYEGOcV8gTCw33T4AnY4x7\nfuHYZcDOMcbdV/L4XsBfCilIkiQtp3eM8dZCnlDQgDrgdWDWCsdmAT/9ksf/B2DMmDF06tSpwLfK\nnxEjRnDJJZdkXUbmPA+f81wknofPeS4SzwPMmjWLPn36QG2WFqLQcH8c6LjCsY7Ay1/y+I8BOnXq\nRJcuzpZr27at5wHPwxd5LhLPw+c8F4nnYTkF39YudEDdJUDXEMLoEMLWtZfd+wN/KPSNJUlS4ygo\n3GOMTwM9gXJgOnAqMCzG+NdGqE2SJNVBoZfliTGOBcY2Qi2SJKkBuLZ8EyovL8+6hGbB8/A5z0Xi\nefic5yLxPNRPQVPhCn7xtOb81KlTpzowQpKkAlRWVlJaWgpQGmOsLOS5du6SJOWM4S5JUs4Y7pIk\n5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM\n4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOEu\nSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklS\nzhjukiTljOEuSWpeli6FBQuyrqKoGe6SpObj2Weha1fo1y/rSoqa4S5Jyt6HH8KoUbDzzrB4MZxy\nStYVFbXVsi5AktTCPfQQDBgAr7wC55wDJ50EbdpkXVVRK6hzDyGcGUJYtsLHzMYqTpKUY/Pnw9FH\nQ1kZbL45TJuWOnaDvd7q0rnPAPYDQu3XSxuuHElS7sUIt90Gw4bBJ5/AtdfCL38JrVK/WVVVxZw5\nc2jfvj0lJSUZF1uc6nLPfWmM8e0Y41u1H/MbvCpJUj7NnQuHHALl5bDnnjBrFhxzDLRqxfz58+ne\n/SA6duxIjx496NChA927H8QCR84XrC7hXhJCeDWEMCeEMCaEsEWDVyVJypeaGrjiCthmG3jmGbjn\nHrjjDth0088e0qtXXyZOnAyMAeYCY5g4cTLl5X2yqrpoFRruk4F+QDdgIPA94LEQwtoNXJckKS9m\nzIDdd4ehQ6FvX5g5E3784+UeUlVVxYQJY6mpuRzoDWwB9Kam5jImTBhLdXV1FpUXrYLuuccYJ3zh\nyxkhhCeBl4EjgBu+7HkjRoygbdu2yx0rLy+nvLy8kLeXJBWTjz+G3/wGLrgA2reH//s/2GOPlT50\nzpw5tZ/ttcK/7A3A7Nmzc33/vaKigoqKiuWOLVy4sM6vV6+pcDHGhSGEKqD9Vz3ukksuoUuXLvV5\nK0lSMXnsMTj2WHjpJTj1VBg9GlZf/UsfvvXWW3/6RFLn/qlHAWjf/itjpuitrOGtrKyktLS0Tq9X\nr0VsQgjrkIL99fq8jiQpJ959N81Z33tv2GCDtOLcWWd9ZbADdOjQgW7detC69VDSPff/AmNo3XoY\n3br1yHXX3hgKnef+uxDCXiGE74QQfgDcDSwBKr7mqZKkvLvrLujcGSoq4Mor02X4zp1X+ekVFWMo\nK+sK9AW2BPpSVtaViooxjVVxbhV6WX5z4FZgA+BtYBLQNcY4r6ELkyQViVdfhSFD0gj4H/0oBfvm\nmxf8Mu3atWP8+Aeorq5m9uzZznOvh0IH1DkCTpKULFuWFqAZNQrWWgv+9jc49FAI4euf+xVKSkoM\n9Xpy4xhJUuFmzUr31QcNgiOOSNPbDjus3sGuhmG4S5JW3eLFaXOXHXeEN9+Ef/4T/vQnaNcu68r0\nBe4KJ0laNf/6V1oq9sUX4Ve/gtNOgzXXzLoqrYSduyTpq733Xhowt/vu6d761KlpcRqDvdmyc5ck\nfbn774fjjoMFC+Dii+H446F166yr0tewc5ck/a833kgD5X70I9huO3j+eRg+3GAvEnbukqTPxQjX\nXw8jR0KbNnDrrXDkkY6CLzJ27pKkpLoa9t0X+vdPu7bNmpX2XTfYi47hLkkt3ZIlcP756fL7yy/D\ngw/CjTemteFVlLwsL0kt2VNPpU59xgw48cS0yctaa2VdlerJzl2SWqIPPoARI6Br1zRI7qmn4MIL\nDfacsHOXpJZm/HgYOBDeegsuuCCF/GrGQZ7YuUtSS/H229CnDxx4IJSUwPTpcNJJBnsO+X9UkvIu\nRrjlFjjhhPT5jTfCz3/uKPgcs3OXpDx76SXo1g1+8Qs44IA0ve0XvzDYc85wl6Q8WroUfv972Hbb\ntNHL2LFpQZpvfSvrytQEDHdJyptnnoHvfz/t3DZgQFo69sADs65KTchwl6S8+PBDGDUKdtklLUwz\neTJccgmss07WlamJOaBOkvLgoYfg2GPh1VfhnHPSKPg2bbKuShmxc5ekYjZvHhx1FJSVwRZbwLRp\ncMopBnsLZ+cuScUoRrjtNhg6FBYvhj/9CY4+GlrZs8nOXZKKz9y5cPDBace2vfdO09v69zfY9Rm/\nEySpWNTUwOWXQ+fO8NxzcM898Le/waabZl2ZmhnDXZKKwfTpsPvuMHx4WoRm5sy057q0Eoa7JDVn\nH38Mp50GXbrAe+/BpElw5ZXwzW9mXZmaMQfUSVJz9eijaXrbv/+dAv7kk2H11bOuSkXAzl2Smpt3\n302hvs8+sOGG8OyzcOaZBrtWmZ27JDUXMcJdd8GQIbBoEVx1VVo+1lHwKpDfMZLUHLz6KvTsCYcd\nltaFnzkTBg0y2FUnftdIUpaWLYOrr07T26ZMgTvugLvvhs03z7oyFTHDXZKyMmsW7LUXHHcc/Oxn\nqVs/9FD3Wle9Ge6S1NQ++QTOPht23BHeegseeQSuvRbatcu6MuWEA+okqSk98QQccwxUVaXtWU87\nDdZYI+uqlDN27pLUFN57DwYPhj32SPurV1bCr39tsKtR2LlLUmO77750X/3dd+HSS1PIt26ddVXK\nMTt3SWosb7wBhx+e1oDfYQd4/vm0RavBrkZm5y5JDS1GuO46OOkkaNMGbr0VjjzSUfBqMnbuktSQ\nqqth333ToLmf/CRNdysvN9jVpAx3SWoIS5bA+efDdtvB3Lnwj3/ADTfABhtkXZlaIC/LS1J9Pflk\n6tSffx5OOAHOOgvWWivrqtSC1atzDyGcHEJYFkK4uKEKkqSi8cEHMGIE7LZbGiT35JNw4YUGuzJX\n5849hLALcCzwXMOVI0lFYty4tLHLW2/Bb38Lw4fDal4MVfNQp849hLAOMAboD7zboBVJ0teoqqpi\n3LhxVFdXN/2bv/UW9O4NPXpASQnMmAEjRxrsalbqeln+SuD+GOPDDVmMJH2V+fPn0737QXTs2JEe\nPXrQoUMHunc/iAULFjT+m8cIN90EnTrB+PHp8wcfhK22avz3lgpUcLiHEI4EdgRGN3w5kvTlevXq\ny8SJk0kXDucCY5g4cTLl5X0a941fegkOOAD69YMDD4QXXoCf/9zpbWq2CrqOFELYHLgUKIsxLmmc\nkiTpf1VVVTFhwlhSsPeuPdqbmprIhAl9qa6upqSkpGHfdOnStFzsGWfAt74FY8emcJeauUJvEpUC\nGwGVIXz2J2trYK8QwhBg9RhjXPFJI0aMoG3btssdKy8vp7y8vA4lS2qJ5syZU/vZXiv8y94AzJ49\nu2HDvbIyTW979tm0ZOy556YNX6RGUFFRQUVFxXLHFi5cWOfXCyvJ4i9/cAhrA99Z4fCNwCzgghjj\nrBUe3wWYOnXqVLp06VLnIiWpqqqKjh07snznTu3XfamqqmqYcP/wwzRP/eKLoXNn+POfYddd6/+6\nUoEqKyspLS0FKI0xVhby3II69xjjImDmF4+FEBYB81YMdklqSB06dKBbtx5MnDiUmppI6tgfpXXr\nYZSV9WiYYJ84EQYMgFdfTZ36yJFpbXipyDTE8rOr3vpLUj1UVIyhrKwr0BfYEuhLWVlXKirG1O+F\n581Lg+X23x+23BKmTYPRow12Fa16T8yMMe7bEIVI0tdp164d48c/QHV1NbNnz6Z9+/b169hjhIqK\ntADNkiXpEvzRRzsKXkXPVRckFZ2SkpL6X4Z/+eW0wty4cWnP9csvh002aZgCpYy5K5yklqWmBi67\nDLbZBqZPh/vug9tvN9iVK4a7pJZj2jT4wQ/SZi/9+qVd3A45JOuqpAZnuEvKv48/hlNPhdLStJPb\npEnwhz/AN7+ZdWVSo/Ceu6R8e/TRtBjNyy/D6afDqFGw+upZVyU1Kjt3Sfm0YEEK9X32SUvHPvts\nWkbWYFcLYOcuKV9ihDvvhOOPh0WL4Oqr4dhjoZW9jFoOv9sl5cerr0LPnmlq2/e/D7NmwcCBBrta\nHL/jJRW/ZctSh96pE0yZAnfcAXffDd/+dtaVSZkw3CUVt5kzYa+94LjjoLw8deuHHuoqc2rRDHdJ\nxemTT9LubTvuCG+/nUbF//GPsN56WVcmZc4BdZKKz+OPp5Hw1dVw8slpDvsaa2RdldRs2LlLKh7v\nvZcuv++xB6y7LlRWpq1ZDXZpOXbukorDvffC4MHw7rtpbfjBg6F166yrkpolO3dJzdvrr6epbT/5\nCeywQxpAN3SowS59BTt3Sc1TjHDddTByJHzjG2nf9Z/9zFHw0iqwc5fU/FRVwQ9/mAbN9eyZprcd\neaTBLq0iw11S87FkCZx3Hmy/PbzyCkycCDfcABtskHVlUlHxsryk5uHJJ6F//3RPfeTItMnLWmtl\nXZVUlOzcJWXrgw9g+HDo2jXdW3/qKbjgAoNdqgc7d0nZGTsWBg1KK8z97ncwbBis5q8lqb7s3CU1\nvbfegl694KCDoGNHmDEDTjzRYJcaiD9JkppOjHDzzXDCCWnk+803Q58+joKXGpidu6SmMWcOHHAA\n9OsHBx6Yprf17WuwS43AcJfUuJYuTffTt9subfQybhyMGQMbbZR1ZVJuGe6SGk9lJey6a9q5beDA\ndG+9e/esq5Jyz3CX1PA+/BBOOikF+7JlMHkyXHwxrLNO1pVJLYID6iQ1rIkTYcAAeO01+PWv0yj4\nNm2yrkpqUezcJTWMefPSYLn994fvfAemTUuX4w12qcnZuUuqnxjTjm3Dh6e14a+7Do46ylHwUobs\n3CXV3csvp4VoevdOu7jNmgVHH22wSxkz3CUVrqYGLrsMttkGpk+H++6D226DTTbJujJJGO6SCjVt\nGvzgBzBiRLr8PnMmHHJI1lVJ+gLDXdKq+fhjOPVUKC2FRYvg8cfhiitg3XWzrkzSChxQJ+nrPfII\nHHtsusd+xhkwalTanlVSs2TnLunLLVgAxxyTBsttvDE89xycfrrBLjVzdu6S/leMcOedMGQIfPQR\nXHNNCvlW9gNSMfAnVdLyXnkFfvITOPzwNHBu5sy04pzBLhUNf1olJcuWwVVXQefO8NRTqXO/6y74\n9rezrkxSgQx3Sak733NPGDwYevVKX//0p1lXJamODHepJfvkEzjrLNhxx7Q2/GOPpfvr662XdWWS\n6qGgcA8hDAwhPBdCWFj78UQIwc2ZpWL0+OOw005w3nlpg5dnn03du6SiV2jn/l9gFNAFKAUeBu4N\nIXRq6MIkNZL33oPjjoM99oC2baGyEs45B9ZYY7mHVVVVMW7cOKqrqzMqVFJdFRTuMcYHYozjY4xz\nYoyzY4ynAR8AXRunPEkN6t5704C5W25Jq8tNmgTbbrvcQ+bPn0/37gfRsWNHevToQYcOHeje/SAW\nLFiQUdGSClXne+4hhFYhhCOBtYB/NVxJkhrc66/DYYelKW477ZQGzA0ZAq1b/89De/Xqy8SJk4Ex\nwFxgDBMnTqa8vE9TVy2pjgpexCaEsC0pzNcA3gd6xhhfaOjCJDWAZcvS/uonnQSrrw5//SscccSX\nbslaVVXFhAljScHeu/Zob2pqIhMm9KW6upqSkpKmql5SHdVlhboXgB2AtsBhwM0hhL2+KuBHjBhB\n27ZtlztWXl5OeXl5Hd5e0ip58cW0Hvxjj6U91n/3O1h//a98ypw5c2o/22uFf9kbgNmzZxvuUiOo\nqKigoqJiuWMLFy6s8+uFGGO9Cgoh/AOYHWMctJJ/6wJMnTp1Kl26dKnX+0haRYsXpyA/91zYfHO4\n9lrYd99VempVVRUdO3Zk+c6d2q/7UlVVZbhLTaSyspLS0lKA0hhjZSHPbYh57q2A1RvgdSTV15Qp\nsPPOcOaZMHw4TJ++ysEO0KFDB7p160Hr1kNJgf5fYAytWw+jW7ceBrtUJAqd535eCGHPEMJ3Qgjb\nhhDOJ12vG9M45UlaJR98kMJ8t93Sjm1PPw0XXABrrlnwS1VUjKGsrCvQF9gS6EtZWVcqKvwxl4pF\noffcvwXcBGwKLASmAQfEGB9u6MIkraKxY2HQIHjnHfj972HoUFit7hs+tmvXjvHjH6C6uprZs2fT\nvn17O3apyBT0GyDG2L+xCpFUoLfeSt16RQUccEBaNvZ732uwly8pKTHUpSLlfu5SsYkRbroJTjwx\nTWm75Rbo3ftLp7dJanncOEYqJnPmwP77w1FHQY8eMGsW9OljsEtajuEuFYOlS+HCC2G77VLAjx+f\nOvaNNsq6MknNkOEuNXeVlbDrrjB6dBo4N2MGdOuWdVWSmjHDXWquPvwwLRu7yy5pGdkpU+Cii2Dt\ntbOuTFIz54A6qTn6xz9gwIC04ct558EJJ0CbNllXJalI2LlLzcm8efCLX6Spbd/9blphbtQog11S\nQezcpeYgxjRffdgwqKmB66+Hfv0cBS+pTuzcpay9/HKa1ta7N+y3X5redtRRBrukOjPcpazU1MCl\nl8I226QR8Pffn/Zb33jjrCuTVOQMdykLzz2XNnk54YTUpc+cCQcfnHVVknLCcJea0kcfwSmnpG1Z\nP/wQHn8crrgC1l0368ok5YgD6qSm8s9/wrHHwty5ab/1X/0qbc8qSQ3Mzl1qbAsWQP/+sO++sOmm\n6ZL8aacZ7JIajZ271FhihDvugOOPT5fjr7kGjjkGWvk3taTG5W8ZqTG88gr8+MdwxBGw++5petuA\nAQa7pCbhbxqpIS1bBldeCZ07w9NPw113wZ13wmabZV2ZpBbEcJcaysyZsOeeMGQI9OqVvu7ZM+uq\nJLVAhrtUX598AmedBTvumNaGf+yxdH99vfWyrkxSC+WAOqk+Jk1K09tmz4aTT05z2NdYI+uqJLVw\ndu5SXSxcCIMGpcvwbdtCZSWcc47BLqlZsHOXCnXPPTB4MLz3XlpdbtAgaN0666ok6TN27tKqeu01\nOPTQNEiuS5c0YG7IEINdUrNjuEtfZ9kyuPbaNL1t0iS47Ta47z7YYousK5OklTLcpa/y4ovwwx+m\nBWgOPTQtRnPEEe61LqlZM9yllVm8GH7zG9hhB3j1VXjoIbjuOlh//awrk6Sv5YA6aUVTpqSNXmbN\ngpNOgjPOgDXXzLoqSVpldu7Sp95/H4YNg912S1Pann4azj/fYJdUdOzcJYAHHkhT2ubNg4suSju5\nreaPh6TiZOeulu3NN6G8HA4+OI2GnzEDRoww2CUVNX+DqWWKEW68EU48MW3Desst0Lu3o+Al5YKd\nu1qeOXNg//3h6KPhoIPSwLk+fQx2SblhuKvlWLoULrwQtt02Bfz48alj32ijrCuTpAZluKtlmDoV\ndtkFRo+G445L99a7dcu6KklqFIa78m3RIhg5EnbdNX09ZUoaDb/22tnWJUmNyAF1yq8HH4SBA+H1\n1+G88+CEE6BNm6yrkqRGZ+eu/HnnHfj5z9Nl9+99D6ZPh1GjDHZJLYadu/IjRrj1Vhg+HGpq4Prr\noV8/R8FLanHs3JUP//kPHHhgmtK2335pettRRxnsklokw13FraYGLrkEttkGnn8e7r8f/vpX2Hjj\nrCuTpMwY7ipezz0HXbumVeZ++UuYOTMtIytJLVxB4R5CGB1CeDKE8F4I4c0Qwt0hhA6NVZy0Uh99\nlOarl5amz594Ai6/HNZdN+vKJKlZKLRz3xO4Avg+UAa0AR4MIbgnpuqtqqqKcePGUV1d/eUP+uc/\nYfvt4eKL4ayzoLIyde+SpM8UFO4xxh4xxltijLNijNOBfsCWQGljFKeWYf78+XTvfhAdO3akR48e\ndOjQge7dD2LBggWfP2jBgnTpfd99YdNN0yX5006Db3wju8IlqZmq7z339YAIzG+AWtRC9erVl4kT\nJwNjgLnAGCZOnEx5eZ80ve3226FTJ7jjDrjmGnjkEfh//y/boiWpGavzPPcQQgAuBSbFGGc2XElq\nSaqqqpgwYSwp2HvXHu1NTU3k+Ql9+aCsjHUefhh++lO44grYbLMMq5Wk4lCfRWyuAjoDu3/dA0eM\nGEHbtm2XO1ZeXk55eXk93l55MGfOnNrP9vrsWCtqGMS/OR9Y7dln4a67oGfPTOqTpKZQUVFBRUXF\ncscWLlxY59cLMcbCnxTCH4BDgD1jjHO/4nFdgKlTp06lS5cudS5S+VVVVUXHjh35tHPvzPP8mf7s\nxmSuBg6YOpWt/d6R1AJVVlZSWloKUBpjrCzkuQXfc68N9h8DP/yqYJdWRYcOHejWrQdrtjqes+jJ\nM+zEesxln1brcm+3Hga7JNVBofPcryLdGO0FLAohbFz7sUajVKcW4fZhQ3hhzcWM5h4uYAk78Rpr\n7L8nFRVjsi5NkopSoffcB5JGxz+ywvGjgJsboiC1IAsXwskn881rruGbXbvy8umns0sITG/fnpKS\nkqyrk6SiVVC4xxhdrlYN4+67YcgQeO+9NAp+0CC+07o138m6LknKAcNaTeu11+DQQ9PUti5d0nrw\nQ4ZA69ZZVyZJuWG4q2ksWwbXXgudO8OkSXDbbXDffbDFFllXJkm5Y7ir8b34IuyzDwwYkLr2WbPg\niCPca12SGonhrsazeDGce27a6OW11+Chh+C662D99bOuTJJyrT4r1ElfbvJk6N8fXngBTjoJzjgD\n1nTzQElqCnbualjvvw9Dh8IPfpDC/Omn4fzzDXZJakJ27mo4f/87HHcczJsHF12UQt5R8JLU5Ozc\nVX9vvglHHgmHHJJGwz//PIwYYbBLUkbs3FV3McKNN8KJJ6YgHzMGevVyFLwkZczOXXUzezaUlcHR\nR8PBB6fpbb17G+yS1AwY7irMkiXw29/CdtvBSy/BhAlw882w4YZZVyZJqmW4a9VNnQq77gqnnAKD\nB8OMGXDAAVlXJUlageGur7doUbqvvuuu6espU+D3v4e11862LknSSjmgTl9twgQYOBDeeAPOOw9O\nOAHatMm6KknSV7Bz18q98w707Qvdu8NWW8H06TBqlMEuSUXAzl3LixH+8hcYPjzt5Hb99dCvn6Pg\nJamI2Lnrc//+Nxx4YOrY998/TW876iiDXZKKjOEuWLoULr4Ytt0WZs5My8hWVMDGG2ddmSSpDgz3\nlu6552C33WDkyLSL2/PPw0EHZV2VJKkeDPeW6qOPYPRoKC2Fjz+GJ56Ayy6DddfNujJJUj05oK4l\nevhhGDAA5s6Fs86CX/0KvvGNrKuSJDUQO/eWZP78tBb8fvvBZpvBtGlw2mkGuyTljJ17SxAj3H57\n2l/9k0/gj39M99db+bedJOWRv93zbu7ctM/6kUfCHnuk0fDHHmuwS1KO+Rs+r2pq4IorYJttoLIS\n7roL7rwzXY6XJOWa4Z5HM2akLn3oUOjTJy1G07Nn1lVJkpqI4Z4nH38MZ5wBXbrAu+/CY4/B1VdD\n27ZZVyZJakIOqMuL//s/OOYYeOmlNH/9lFNg9dWzrkqSlAE792K3cGHaknWvvWD99eGZZ+Dssw12\nSWrB7NyL2d13w+DB8P778Ic/wKBBjoKXJNm5F6XXXoOf/jR97Lxzmt42eLDBLkkCDPfismwZXHMN\ndOqU1oK//Xa4917YYousK5MkNSOGe7F44QXYe+906f3ww9P0tsMPd691SdL/MNybu8WL4dxzYYcd\n4I030qYvf/4ztGuXdWWSpGbKAXXN2b/+laa3vfBC2rnt9NNhzTWzrkqS1MzZuTdH778Pxx8Pu++e\nwnzqVDjvPINdkrRK7Nybm7//Pd1Xnz8fLrooLSHbunXWVUmSioide3Px5pvws5+lHdy22Qaefx5G\njDDYJUkFs3PPWoxwww0wcmQK8jFjoFcvR8FLkurMzj1L1dWw337wy1+mjn3WLOjd22CXJNVLweEe\nQtgzhHBfCOHVEMKyEMKPGqOwXFuyBC64ALbfHv7zH5gwAW66CTbcMOvKJEk5UJfOfW3gWeA4IDZs\nOS3AU0/BLrvAqaemJWOnT4cDDsi6KklSjhR8zz3GOB4YDxCC149XVfWzz7La2Wfz3fvuI2y/PTz5\nJJSWZl2WJCmHvOfeyObPn0+Pbj34ZKed2OSeexi1bBkHbbQJC7baKuvSJEk55Wj5RtarV18mPjSF\nNRjGNA5lDnNp/fBQysv7MH78A1mXJ0nKIcO9EVVVVTFhwlhgDHfT+7PjNTWRCRP6Ul1dTUlJSXYF\nSpJyqUnCfcSIEbRt23a5Y+Xl5ZSXlzfF22dmzpw5tZ/ttcK/7A3A7NmzDXdJEhUVFVRUVCx3bOHC\nhXV+vSYJ90suuYQuXbo0xVs1K1tvvXXtZ4/BFzp3eBSA9u3bN3VJkqRmaGUNb2VlJaV1HHhdcLiH\nENYG2gNfmuAEAAAHO0lEQVSfjpTfKoSwAzA/xvjfOlWRUx06dKBbtx5MnDiUmppI6tgfpXXrYZSV\n9bBrlyQ1irqMlt8ZeAaYSprnfhFQCZzdgHXlRkXFGMrKugJ9gS2BvpSVdaWiYkzGlUmS8qou89wf\nxSl0q6xdu3aMH/8A1dXVzJ49m/bt29uxS5IalaPlm0hJSYmhLklqEnbgkiTljOEuSVLOGO6SJOWM\n4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOEu\nSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklS\nzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y\n7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4d6EKioqsi6hWfA8fM5zkXgePue5SDwP9VOncA8h\nDA4h/DuE8FEIYXIIYZeGLiyP/GZNPA+f81wknofPeS4Sz0P9FBzuIYSfARcBZwI7Ac8BE0IIGzZw\nbZIkqQ7q0rmPAP4YY7w5xvgCMBD4EDi6QSuTJEl1UlC4hxDaAKXAQ58eizFGYCKwW8OWJkmS6mK1\nAh+/IdAaeHOF428CHVfy+DUAZs2aVXhlObRw4UIqKyuzLiNznofPeS4Sz8PnPBeJ52G57Fyj0OeG\n1Hiv4oND2BR4FdgtxjjlC8d/C+wVY9xthcf3Av5SaFGSJOkzvWOMtxbyhEI793eAGmDjFY5vDLyx\nksdPAHoD/wE+LvC9JElqydYAvkvK0oIU1LkDhBAmA1NijMNqvw7AXODyGOPvCi1AkiQ1rEI7d4CL\ngRtDCFOBJ0mj59cCbmzAuiRJUh0VHO4xxttr57SfQ7oc/yzQLcb4dkMXJ0mSClfwZXlJktS8uba8\nJEk5Y7hLkpQzjRrubjADIYQ9Qwj3hRBeDSEsCyH8KOuashBCGB1CeDKE8F4I4c0Qwt0hhA5Z19XU\nQggDQwjPhRAW1n48EULonnVdWQshnFz783Fx1rU0tRDCmbX/7V/8mJl1XVkJIWwWQrglhPBOCOHD\n2p+XLlnX1ZRqc3PF74llIYQrVvU1Gi3c3WDmM2uTBh0eB7TkAQ57AlcA3wfKgDbAgyGENTOtqun9\nFxgFdCEt5fwwcG8IoVOmVWWo9o/+Y0m/I1qqGaQBypvUfuyRbTnZCCGsBzwOfAJ0AzoBJwILsqwr\nAzvz+ffCJsD+pPy4fVVfoNEG1H3JfPj/kubDX9gob9rMhRCWAT+JMd6XdS1Zq/0j7y3SyoaTsq4n\nSyGEecDIGOMNWdfS1EII6wBTgUHA6cAzMcYTsq2qaYUQzgR+HGNsUd3pyoQQLiCtgLp31rU0JyGE\nS4EeMcZVvtrZKJ27G8xoFaxH+kt0ftaFZCWE0CqEcCRpnYh/ZV1PRq4E7o8xPpx1IRkrqb11NyeE\nMCaEsEXWBWXkEODpEMLttbfvKkMI/bMuKku1edobuK6Q5zXWZfmv2mBmk0Z6TxWJ2qs4lwKTYowt\n7t5iCGHbEML7pEuPVwE9a7dPblFq/7DZERiddS0Zmwz0I12GHgh8D3gshLB2lkVlZCvSVZwXgQOA\nq4HLQwh9M60qWz2BtsBNhTypLivUSfV1FdAZ2D3rQjLyArAD6Qf2MODmEMJeLSngQwibk/7AK4sx\nLsm6nizFGL+4bviMEMKTwMvAEUBLu1XTCngyxnh67dfPhRC2Jf3Rc0t2ZWXqaGBcjHFl+7d8qcbq\n3AvdYEYtRAjhD0APYJ8Y4+tZ15OFGOPSGONLMcZnYoynkgaSDcu6riZWCmwEVIYQloQQlgB7A8NC\nCItrr+60SDHGhUAV0D7rWjLwOrDiHuGzgC0zqCVzIYQtSQOQ/1Tocxsl3Gv/Ep8K7Pfpsdof1v2A\nJxrjPdX81Qb7j4EfxhjnZl1PM9IKWD3rIprYRGA70mX5HWo/ngbGADvEFrx0Zu0gw/akoGtpHgc6\nrnCsI+lKRkt0NOl29thCn9iYl+XdYAaovW/WHvi0E9kqhLADMD/G+N/sKmtaIYSrgHLgR8CiEMKn\nV3UWxhhbzHbAIYTzgHGknRTXJQ2U2Zt0f7HFiDEuApYbbxFCWATMizGu2LnlWgjhd8D9pAD7NnA2\nsASoyLKujFwCPB5CGE2a9vV9oD9wTKZVZaC2Ie4H3BhjXFbo8xst3N1g5jM7A/8kjQyPpLn/kAZH\nHJ1VURkYSPrvf2SF40cBNzd5Ndn5Fun//abAQmAacICjxYGWuw7E5sCtwAbA28AkoGuMcV6mVWUg\nxvh0CKEncAFpauS/gWExxr9mW1kmyoAtqOO4CzeOkSQpZ1xbXpKknDHcJUnKGcNdkqScMdwlScoZ\nw12SpJwx3CVJyhnDXZKknDHcJUnKGcNdkqScMdwlScoZw12SpJz5/4VLaJPfiTa/AAAAAElFTkSu\nQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAW4AAAD8CAYAAABXe05zAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4xLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvDW2N/gAAGzJJREFUeJzt3WmUVOW1xvH/Fo0R4pSAs9BJzEArItrggBcFicNVo7n3uoy214mIMWhi1GskRI1JICpRUQaRNA6YVoMIKjSKDAIyNNBMQjOIKI0ihiaiAo1M/d4Pu53Rrh5OnTpVz28tVnUXRdWuDz5r+579vsdCCIiISHLsFncBIiJSNwpuEZGEUXCLiCSMgltEJGEU3CIiCaPgFhFJGAW3iEjCKLhFRBJGwS0ikjC7R/GmzZs3D3l5eVG8tYhIVpo7d+76EEKLVF4bSXDn5eVRVlYWxVuLiGQlM6tI9bVaKhERSRgFt4hIwii4RUQSRsEtIpIwCm4RkYRRcIuIJIyCW0QkYRTcIiINtXEj3HQTrF2blo9TcIuINERJCeTnw733wksvpeUjFdwiIvVRWQkXXwznnAP77gszZsBll6XloxXcIiJ1EQI8/ji0bg0jRsAdd8C8eXDCCWkrIaWzSsxsFbAR2AnsCCEURFmUiEhGqqiAq6+GcePgxBOhqMiXSdKsLh135xDCMQptEck5O3fC/ffDkUfC9OnQvz9Mm/ZJaBcXQ14e7LabPxYXR1tOJKcDiohkjfJy6NYNZs2Cs86CwYOhZctP/rq4GLp3h6oq/72iwn8HKCyMpqRUO+4AvGRmc82sezSliIhkkK1b4fbboV07WLnSE7qk5HOhDdCr16eh/bGqKn8+Kql23B1DCO+Y2QHAeDNbFkKY+tkX1AR6d4CWX/hiIiKJMmMG/OIXsHQpXHIJ3HcfNG++y5euXr3rt/iq5xtDSh13COGdmsd1wCigwy5eMySEUBBCKGjRIqWbOIiIZJaNG+G66+Dkk2HzZhg71idIviK04UsNeK3PN4Zag9vMmpnZ3h//DJwOLI6uJBGRGIwd6xcfBw708F682Ne0a9G7NzRt+vnnmjb156OSSsd9IDDNzBYCs4GSEMKL0ZUkIpJGlZV+FfHss2HvvX1q5P77/ecUFBbCkCHQqhWY+eOQIdFdmASwEEKjv2lBQUHQPSdFJKOF4Bccr78ePvzQrybecgvsuWcs5ZjZ3FTHrTUOKCK5p6ICrrkGXnjBdzwWFfkySUJoy7uI5I6dO+GBBzykp071JZFp0xIV2qCOW0RyRXm5j/iVlsKZZ/pGmlat4q6qXtRxi0h227oV/vhH30izYoWP940dm9jQBnXcIpLNZs70LnvJEj+CtV8/yIJ9Juq4RST7bNoEv/41dOzom2rGjPEJkiwIbVDHLSLZ5sUX/ejVt96CHj2gT5+UZ7KTQh23iGSH9ev9XJGzzoJmzXxapH//rAttUHCLSNKFAE884XekGT4cbrsN5s+Hk06Ku7LIaKlERJJr9WrfSDN2LBx/vG+kOeqouKuKnDpuEUme6moYMMA3zkye7NMi06fnRGiDOm4RSZolS3zEb+ZMOP10eOghv19YDlHHLSLJsG0b/OlPvpFm+XIYNswnSHIstEEdt4gkQWmpd9nl5XDRRb40csABcVcVG3XcIpK5Nm3yY1dPOgk++ABGj/YJkhwObVDHLSKZatw430hTUfHpRpp99om7qoygjltEMsv69XDppX6C3157+UaaAQMU2p+h4BaRzBACPPkk5Of74623+kaajh3jrizjaKlEROL31lvwq1/5YVAdOsDEidCmTdxVZSx13CISn+pqGDTIN9JMmgT33QczZii0a6GOW0TisWyZj/hNnw4/+YlvpPnud+OuKhHUcYtIem3bBn/5C7RtC0uXwmOP+QSJQjtl6rhFJH1mz/Yue9EiuPBCv1nvgQfGXVXiqOMWkeht3gw33AAnngjvvQfPPw9PPaXQrid13CISrZde8o00q1b5Eax33qmZ7AZSxy0i0fj3v+Hyy+GMM2DPPeGVV3yCRKHdYApuEWlcIcA//+kbaYqLoVcvWLAATj457sqyhpZKRKTxvP22b6QZPRrat4fx4+Hoo+OuKuuo4xaRhquuhgcf9C57wgS45x6/0YFCOxLquEWkYZYtg6uu8sOgunb1jTTf+17cVWU1ddwiUj/bt0Pv3r6RprwcHnnEJ0gU2pFTxy0idTdnDnTr5htpLrgAHngADjoo7qpyhjpuEUnd5s1w441wwgk+7vfsszB8uEI7zdRxi0hqJkyA7t3hzTd9Q81dd8G++8ZdVU5Sxy0iX++99+CKK/wEvz32gClTYPBghXaMFNwismsh+DJI69bw+OPQsycsXAidOsVdWc7TUomIfNmaNb6R5vnn4bjjfFqkbdu4q5Ia6rhF5FPV1b4Mkp/vux779oXSUoV2hlHHLSJu+XK/+Dh1KnTpAkOGwPe/H3dVsgspd9xm1sTM5pvZmCgLEpE0274d+vTxrvrVV2HoUJ8gUWhnrLoslfwGWBpVISLih+nl5cFuu/ljcXHEH1hW5odB9eoF557rtxK78kowi/iDpSFSCm4zOww4GyiKthyR3FVc7CsVFRU+0FFR4b9HEt5VVXDTTXD88bBuHYwaBU8/rY00CZFqx90PuBmojrAWkZzWq5fn6WdVVfnzjWriRGjTxk/w+8UvYMkSOP/8Rv4QiVKtwW1m5wDrQghza3lddzMrM7OyysrKRitQJFesXl235+tswwZfBunaFZo0gcmT/SS//fZrpA+QdEml4+4I/NTMVgFPAV3M7B9ffFEIYUgIoSCEUNCiRYtGLlMk+7VsWbfnUxYCjBjhG2mGDYNbbvGNNKec0sA3lrjUGtwhhJ4hhMNCCHnAz4FJIYRLIq9MJMf07g1Nm37+uaZN/fl6W7MGfvYzP8Hv0EP9VL+//hX22qtBtUq8tAFHJEMUFvrodKtWPtTRqpX/XlhYjzerrvZ/nJ8P48bB3XfDrFnQrl2j1y3pZyGERn/TgoKCUFZW1ujvKyIpeO01H0eZMgU6d/YAP+KIuKuSWpjZ3BBCQSqvVcctki22b4c77/T7PC5YAEVFPkGi0M462vIukg3mzvXRvgUL4L//G/r3h4MPjrsqiYg6bpEkq6qCm2+GDh3gX/+CkSN9gkShndXUcYsk1aRJvpa9cqXfZf3uuzWTnSPUcYskzYYNvixy2mk+fjJpkl+AVGjnDAW3SJI884yP+D36KPzud36aX+fOcVclaaalEpEkeOcduPZaPwyqXTsoKYFjj427KomJOm6RTFZdDX//u3fZL7zgd1afPVuhnePUcYtkqhUr/OLj5Mlw6qm+jv2DH8RdlWQAddwimWbHDu+sjz4a5s/3jnvSJIW2fEIdt0gmmTfPJ0bmz/fDoQYMgEMOibsqyTDquEUywZYtPiXSoQOsXeubaEaOVGjLLqnjFonb5Mm+geb1173bvvtu2H//uKuSDKaOWyQu77/vgd25s0+PTJzo69kKbamFglskDqNG+Yjfww/D//0fLFoEXbrEXZUkhJZKRNJp7VrfSDNyJLRtC6NHw3HHxV2VJIw6bpF0CAGGDvUuu6TEbx82Z45CW+pFHbdI1F5/3TfSvPwydOrk69g//GHcVUmCqeMWicqOHdC3L7Rp4zc6eOghD2+FtjSQOm6RKCxYAN26+Yaa886DgQP9LusijUAdt0hj2rIFevaEggJYswaeftonSBTa0ojUcYs0lilTfC57xQq48kpfJvn2t+OuSrKQOm6RhvrgA7j6aj/Bb+dOmDDBJ0gU2hIRBbdIQzz3nI/4FRXBTTf5RprTTou7KslyCm6R+nj3XbjgAjj/fGjeHGbN8qWRpk3jrkxygIJbpC5CgEce8S579Gjo0wfKyvxipEia6OKkSKreeMM30kycCP/xH76R5kc/irsqyUHquEVqs2MH3HMPHHWUb1MfPNiPYlVoS0zUcYt8nYUL/YzssjL46U9h0CDNZEvs1HGL7MpHH0GvXr52vXo1DB8Ozz6r0JaMoI5b5IteecU30ixfDpdf7sskmsmWDKKOW+RjH34I11zjJ/ht2wbjx/sEiUJbMoyCWwR8tC8/H4YMgRtu8I00XbvGXZXILim4Jbf961/w85/7hcfvfAdKS31ppFmzuCsT+UoKbslNIcBjj0Hr1n5631/+4pMj7dvHXZlIrXRxUnLPm2/6oVDjx8PJJ/tGmh//OO6qRFKmjltyx86dcN99vpGmtNRnsqdMUWhL4qjjltzw6qu+kWbOHDj3XA/tww6LuyqRelHHLdnto4/g1lv9buqrVsFTT/lRrAptSbBag9vMvmlms81soZmVm9kd6ShMpMGmTYN27fzCY2EhLF0KF14IZp+8pLgY8vJgt938sbg4tmpFUpZKx70V6BJCaAscA5xpZidEW5ZIA3z4IfTo4Sf4ffQRjBsHjz7q436fUVzsh/1VVPiQSUWF/67wlkxXa3AHt6nm1z1q/oRIqxKprzFj4Mgj/QS/3/4WFi+G00/f5Ut79YKqqs8/V1Xlz4tkspTWuM2siZktANYB40MIs3bxmu5mVmZmZZWVlY1dp8jXW7cOLrrILzzutx/MnAn33vu1G2lWr67b8yKZIqXgDiHsDCEcAxwGdDCzo3bxmiEhhIIQQkGLFi0au06RXQsBhg3zjTQjR8Kf/wxz50KHDrX+05Yt6/a8SKao01RJCOF9YDJwZiTViNTFqlVw5plw2WUe3AsWwB/+AN/4Rkr/vHfvL98ismlTf14kk6UyVdLCzPar+XkvoCuwLOrCRL7Szp3Qr5+vZc+YAQMHwtSpHt51UFjoZ0q1auWDJq1a+e+FhRHVLdJIUtmAczDwmJk1wYN+eAhhTLRliXyFRYt8I83s2XD22fDgg3D44fV+u8JCBbUkT63BHUJ4FWiXhlpEvtrWrb6G8de/wv77w5NPfmkmWyRXaMu7ZL7p073LXrYMLr3Up0W+MJMtkku05V0y18aNcO21vpFmyxZ48UU/ilWhLTlOwS2ZqaTE70gzaBD8+te+keaMM+KuSiQjKLgls1RWwsUXwznnwL77+tRIv37wrW/FXZlIxlBwS2YIAR5/3Ef6RoyAO+6AefPgBB2LI/JFujgp8auo8DvSjBsHJ54IRUW+TCIiu6SOW+Kzcyfcf79vpJk+Hfr396NYFdoiX0sdt8SjvBy6dYNZs+Css/w0Px0SIpISddySXlu3wu23+w0OVq70w69LShTaInWgjlvSZ8YM30izdClcconfuLd587irEkkcddwSvY0b4brr4OSTYfNmGDvWJ0gU2iL1ouCWaI0d6xcfBw708F682Ne0RaTetFQi0aishOuvhyee8CmR6dN91E9EGkwdtzSuEOAf//CNNE8/DX/8o2+kUWiLNBp13NJ4KirgmmvghRd8x2NRkS+TiEijUsctDbdzJzzwgIf01Kn+87RpCm2RiKjjloYpL/cRv9JSv//j4MF+DzARiYw6bqmfrVt9/bpdO1ixwte1x45VaIukgTpuqbuZM73LXrLEb9h4333QokXcVYnkDHXckrpNm/ymBh07+qaakhLvtBXaImml4JbUvPiiX2wcMMBvJ1ZeDv/5n3FXJZKTFNzy9dav93NFzjoLmjXzaZEHHoC99467MpGcpeCWXQvBdz22bg3Dh8Ntt8H8+XDSSXFXJpLzdHFSvmz1at9IM3YsHH+8b6Q56qi4qxKRGuq45VPV1b6GfeSRMHmy36R3+nSFtkiGUcctbskSH/GbORNOPx0eegjy8uKuSkR2QR13rtu2Df70J99Is3w5DBvmEyQKbZGMpY47l5WWepddXg4XXeRLIwccEHdVIlILddy5aNMmPyv7pJPggw9g9GifIFFoiySCOu5cM24cXH21H8Haowf06QP77BN3VSJSB+q4c8X69XDppX6C3157+UaaAQMU2iIJpODOdiHAk0/67cOefBJuvdU30nTsGHdlIlJPWirJZm+95RtpSkqgQweYOBHatIm7KhFpIHXc2ai62u+qnp8PL7/sx67OmKHQFskS6rizzdKlPuI3Ywb85Ce+kea73427KhFpROq4s8W2bfDnP8Mxx8CyZfDYYz5BotAWyTrquLPBrFneZS9eDBdeCPffDwceGHdVIhIRddxJtnkz/Pa3cOKJsGEDPP88PPWUQlsky9Ua3GZ2uJm9bGZLzazczH6TjsKkFi+95Kf29esHv/ylHxJ17rlxVyUiaZBKx70DuDGE0Bo4AehhZvnRlpXbiov9jKfddvPH4uLP/OW//w2XXQZnnAF77gmvvAKDBmkjjUgOqXWNO4SwFlhb8/NGM1sKHAosibi2nFRcDN27Q1WV/15R4b8TAoW7/9Nv1rthA/TqBX/4A3zzm7HWKyLpV6eLk2aWB7QDZkVRjHgefxzaH9u/6m0O7H4NbBkD7dvDhAlw9NHxFCgisUv54qSZfQt4Brg+hPDhLv6+u5mVmVlZZWVlY9aYU1av/vRno5prGMQS8jlpy0S45x6/0YFCWySnpRTcZrYHHtrFIYSRu3pNCGFICKEghFDQokWLxqwxp7Rs6Y8/YhlTOIVB9GAWx3PGIYvhhhugSZN4CxSR2KUyVWLAUGBpCOHe6EvKbX3u2M7te/RmIW05knIu5xHO3+slfnn39+IuTUQyRCpr3B2B/wUWmdmCmud+H0IYG11ZOWrOHC6+pxtsX8SYphdwVdUD7NnqIIb0hsLCuIsTkUyRylTJNMDSUEvu2rwZbrvNZ7IPOgiefZZzzjvPR3lERL5AW97jNmGCz/u9+abfmeauu2DffeOuSkQymLa8x+W99+CKK/wEvz32gClTYPBghbaI1ErBnW4hwPDh0Lo1PP449OwJCxdCp05xVyYiCaGlknRaswZ+9Ss/DOq44/y8kbZt465KRBJGHXc6VFf7Mkh+PowfD337QmmpQltE6kUdd9SWL/eLj1OnQpcuMGQIfP/7cVclIgmmjjsq27dDnz7eVb/6Kgwd6hMkCm0RaSB13FEoK/M70ixcCP/zP9C/v89ni4g0AnXcjamqCm66CY4/Htatg1Gj4OmnFdoi0qjUcTeWiRN9LfuNN/zxrrtgv/3irkpEspA67obasAGuvBK6dvWT+yZPhoceUmiLSGQU3PUVAowY4Rtphg2DW27xNe1TTom7MhHJcloqqY81a6BHD3juOTj2WHjhBWjXLu6qRCRHqOOui+pqn8POz4dx4+Duu2HWLIW2iKSVOu5UvfaaX3ScMgU6d/YAP+KIuKsSkRykjrs227fDnXf6fR4XLIC//90nSBTaIhITddxfZ9486NbNA/u//gsGDICDD467KhHJceq4d6WqCm6+GTp0gHffhWee8T8KbRHJAOq4v2jSJF/LXrnSt6337auZbBHJKOq4P7Zhgwf1aaf575Mm+Xq2QltEMoyCG3wZJD8fHn3Ul0gWLfLJERGRDJTbSyXvvAPXXuuHQR1zDJSU+IYaEZEMlpsddwi+DJKf77se77wTZs9WaItIIuRex/3663DVVX4Y1Kmn+kaaH/wg7qpERFKWOx33jh1+1GqbNjB/vgf2xIkKbRFJnNzouOfP94008+fDz37mG2kOOSTuqkRE6iW7O+4tW+B3v4P27WHtWj+GdeRIhbaIJFr2dtyTJ/ta9uuve7fdty/sv3/cVYmINFj2ddzvv++B3bmzH8M6cSIUFSm0RSRrZFdwjxrlI34PP+w37V20CLp0ibsqEZFGlR1LJWvX+kaakSOhbVsYPRqOOy7uqkREIpHsjjsEGDrUu+ySEujTB+bMUWiLSFZLbse9cqWf4jdpEnTq5Dshf/jDuKsSEYlc8jruHTt8QqRNGygrg8GD4eWXFdoikjOS1XEvWOBHr86dC+edBwMHwqGHxl2ViEhaJaPj3rIFevaEggJ46y0YPtwnSBTaIpKDMr/jnjLF57JXrIArroC//Q2+/e24qxIRiU3mdtwffABXX+0n+O3YAePH+3y2QltEclxmBvdzz/mIX1ER3Hijb6Tp2jXuqkREMkKtwW1mD5vZOjNbHGUhxcXQ/vB3edougPPPZ8PuzaG01JdGmjWL8qNFRBIllY77UeDMKIsoLvaR7I5vP8W5jOb39Cavsozi19pH+bEiIolkIYTaX2SWB4wJIRyVypsWFBSEsrKylIvIy4OKCmjCDvJYxUqOAKBVK1i1KuW3ERFJLDObG0IoSOW1jbbGbWbdzazMzMoqKyvr9G9Xr/bHnez+SWh/9nkREflUowV3CGFICKEghFDQokWLOv3bli3r9ryISC7LiKmS3r2hadPPP9e0qT8vIiKflxHBXVjo9+5t1QrM/HHIEH9eREQ+r9adk2b2JHAq0NzM3gZuDyEMbexCCgsV1CIiqag1uEMIF6WjEBERSU1GLJWIiEjqFNwiIgmj4BYRSRgFt4hIwii4RUQSJqWzSur8pmaVQEU9/3lzYH0jlpME+s7ZL9e+L+g711WrEEJK284jCe6GMLOyVA9ayRb6ztkv174v6DtHSUslIiIJo+AWEUmYTAzuIXEXEAN95+yXa98X9J0jk3Fr3CIi8vUyseMWEZGvkTHBna6bEmcSMzvczF42s6VmVm5mv4m7piiZ2TfNbLaZLaz5vnfEXVO6mFkTM5tvZmPiriUdzGyVmS0yswVmlvp9DBPKzPYzsxFmtqzmv+cTI/28TFkqMbNOwCZgWKr3tkw6MzsYODiEMM/M9gbmAueHEJbEXFokzMyAZiGETWa2BzAN+E0IoTTm0iJnZjcABcA+IYRz4q4nama2CigIIeTEHLeZPQa8EkIoMrNvAE1DCO9H9XkZ03GHEKYC78VdRzqFENaGEObV/LwRWAocGm9V0QluU82ve9T8yYzOIUJmdhhwNlAUdy3S+MxsH6ATMBQghLAtytCGDAruXGdmeUA7YFa8lUSrZslgAbAOGB9CyOrvW6MfcDNQHXchaRSAl8xsrpl1j7uYiH0PqAQeqVkOKzKzZlF+oII7A5jZt4BngOtDCB/GXU+UQgg7QwjHAIcBHcwsq5fFzOwcYF0IYW7ctaRZxxDCscBZQI+apdBstTtwLPBgCKEdsBm4JcoPVHDHrGat9xmgOIQwMu560qXmfyUnA2fGXErUOgI/rVnzfQroYmb/iLek6IUQ3ql5XAeMAjrEW1Gk3gbe/sz/PY7AgzwyCu4Y1VysGwosDSHcG3c9UTOzFma2X83PewFdgWXxVhWtEELPEMJhIYQ84OfApBDCJTGXFSkza1ZzsZ2aJYPTgaydFgshvAu8ZWY/qnnqNCDSAYNa7zmZLum6KXGG6Qj8L7CoZt0X4PchhLEx1hSlg4HHzKwJ3jQMDyHkxHhcjjkQGOV9CbsDT4QQXoy3pMhdBxTXTJS8AVwR5YdlzDigiIikRkslIiIJo+AWEUkYBbeISMIouEVEEkbBLSKSMApuEZGEUXCLiCSMgltEJGH+H+yYASXcfDjCAAAAAElFTkSuQmCC\n", "text/plain": [ - "" + "
    " ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], @@ -234,8 +227,8 @@ " print_progress=3)\n", "gd_lr.fit(X, y)\n", "\n", - "print('Intercept: %.2f' % gd_lr.w_)\n", - "print('Slope: %.2f' % gd_lr.b_)\n", + "print('Intercept: %.2f' % gd_lr.b_)\n", + "print('Slope: %.2f' % gd_lr.w_)\n", "\n", "def lin_regplot(X, y, model):\n", " plt.scatter(X, y, c='blue')\n", @@ -253,12 +246,14 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAk4AAAGGCAYAAACNCg6xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAPYQAAD2EBqD+naQAAIABJREFUeJzt3X2YXWV57/HvDQyQKDOUBpNDi0VK5KXVSFKq2FZbkUZQ\nQU+lMEhFaH1BPbaxLa3XUVGs4qmXiaCkgrZFBOOhckRR2lDUek4lKSWBtJUQ3wK+YCAIBiSBQHKf\nP9baujPOJGsme2Y9O/P9XNe61uxnP2vte++HML951tprRWYiSZKkXdur7QIkSZL6hcFJkiSpIYOT\nJElSQwYnSZKkhgxOkiRJDRmcJEmSGjI4SZIkNWRwkiRJasjgJEmS1JDBSZIkqaFiglNEvDEi1kfE\nlohYGRHH7aTvyyPixoi4LyI2RcTNEfG7o/Q7LSLW1vtcExEnTe67kCRJe7IiglNEnA58ALgAOBZY\nAyyPiFljbPI84EbgJGA+8GXg+oiY17XP5wKfBD4KPAv4LHBdRBwzWe9DkiTt2aKEm/xGxErg3zLz\nj+vHAXwXuCQz/7rhPv4L+FRm/lX9+FPAzMw8pavPCuC2zHxDr9+DJEna87U+4xQRA8AC4IudtqzS\n3E3A8Q33EcABwANdzcfX++i2vOk+JUmSRmo9OAGzgL2Be0e03wvMabiPPweeBFzT1TZnN/cpSZK0\ng33aLmB3RcSZwNuBUzLz/t3c188DC4G7gEd3vzpJkjTF9gcOA5Zn5g97vfMSgtP9wDZg9oj22cCG\nnW0YEWcAlwOvyMwvj3h6wwT2uRC4elcFS5Kk4r2S6ktiPdV6cMrMxyNiFXAC8Dn4yTlLJwCXjLVd\nRAwDHwNOz8x/GqXLilH2cWLdPpa7AK666iqOPvrocbyLifnRj+CEE+D974cXvGDSX67vLFq0iCVL\nlrRdhmqOR1kcj7I4HuVYu3YtZ511FtS/03ut9eBUWwxcUQeoW4BFwEzgCoCIuAg4JDPPrh+fWT/3\nZuDfI6Izs7QlMx+qf74Y+JeIeAvwBWCY6iT01+ykjkcBjj76aObPn9+zNzeWhx+u1k99KkzBy/Wd\noaGhKRkHNeN4lMXxKIvjUaRJOeWmhJPDycxrgD8DLgRuA54JLMzMjXWXOcChXZu8huqE8kuBe7qW\nD3btcwVwJvBa4HbgvwOnZuYdk/pmxmFgoFpv3dpuHZIkqZlSZpzIzKXA0jGeO2fE499puM9rgWt3\nv7rJ0QlOjz/ebh2SJKmZImacpqu994a99nLGSZKkfmFwatm++zrjNJbh4eG2S1AXx6MsjkdZHI/p\nw+DUsoEBZ5zG4v+IyuJ4lMXxKIvjMX0YnFrmjJMkSf3D4NQyZ5wkSeofBqeWOeMkSVL/MDi1zBkn\nSZL6h8GpZQMDzjhJktQvDE4t23dfZ5wkSeoXBqeWOeMkSVL/MDi1zBknSZL6h8GpZc44SZLUPwxO\nLfNyBJIk9Q+DU8u8HIEkSf3D4NQyZ5wkSeofBqeWOeMkSVL/MDi1zBknSZL6h8GpZc44SZLUPwxO\nLXPGSZKk/mFwapkzTpIk9Q+DU8u8AKYkSf3D4NQyb7kiSVL/MDi1zBknSZL6h8GpZc44SZLUPwxO\nLXPGSZKk/mFwapkzTpIk9Q+DU8uccZIkqX8YnFrmBTAlSeofBqeWdWacMtuuRJIk7YrBqWX77lut\nn3ii3TokSdKuGZxaNjBQrT1BXJKk8hmcWtaZcfI8J0mSymdwapkzTpIk9Q+DU8s6wckZJ0mSymdw\nalnnUJ0zTpIklc/g1LL99qvWjz3Wbh2SJGnXDE4tO+CAav3ww+3WIUmSds3g1LLBwWr90EPt1iFJ\nknbN4NQyg5MkSf3D4NSyTnDatKndOiRJ0q4ZnFo2MAAzZjjjJElSPzA4FWBw0OAkSVI/MDgVwOAk\nSVJ/MDgVYGjIc5wkSeoHBqcCOOMkSVJ/MDgVwOAkSVJ/MDgVwOAkSVJ/MDgVwOAkSVJ/MDgVwJPD\nJUnqDwanAjjjJElSfzA4FaATnDLbrkSSJO2MwakAg4OwbRts2dJ2JZIkaWcMTgUYGqrWnuckSVLZ\nDE4FGBys1p7nJElS2QxOBTA4SZLUHwxOBTA4SZLUHwxOBegEJ89xkiSpbAanAjjjJElSfzA4FWBg\nAGbMMDhJklQ6g1MhvHq4JEnlMzgVwuAkSVL5DE6F8Ea/kiSVz+BUCGecJEkqn8GpEAYnSZLKZ3Aq\nhMFJkqTyGZwK4TlOkiSVz+BUCGecJEkqn8GpEAYnSZLKZ3AqRCc4ZbZdiSRJGovBqRCDg7BtG2ze\n3HYlkiRpLMUEp4h4Y0Ssj4gtEbEyIo7bSd85EXF1RKyLiG0RsXiUPmdHxPb6+e31UmwsGRqq1h6u\nkySpXEUEp4g4HfgAcAFwLLAGWB4Rs8bYZD/gPuDdwO072fUmYE7X8ku9qrnXBgertcFJkqRyFRGc\ngEXAZZl5ZWbeCbwe2AycO1rnzLw7Mxdl5lXAzqJGZubGzLyvXjb2vvTeMDhJklS+1oNTRAwAC4Av\ndtoyM4GbgON3c/dPjoi7IuI7EXFdRByzm/ubNAYnSZLK13pwAmYBewP3jmi/l+rw2kSto5qxOgV4\nJdV7vTkiDtmNfU6azjlOXgRTkqRy7dN2AZMlM1cCKzuPI2IFsBZ4HdW5VGNatGgRQ50kUxseHmZ4\neHgSKq0ccEC1dsZJkqRmli1bxrJly3Zo2zTJMxAlBKf7gW3A7BHts4ENvXqRzHwiIm4DjthV3yVL\nljB//vxevXQjAwMwY4bBSZKkpkab1Fi9ejULFiyYtNds/VBdZj4OrAJO6LRFRNSPb+7V60TEXsAz\ngB/0ap+95tXDJUkqWwkzTgCLgSsiYhVwC9W37GYCVwBExEXAIZl5dmeDiJgHBPBk4OD68dbMXFs/\n/3aqQ3XfBA4EzgeeCnxsit7TuA0Oeo6TJEklKyI4ZeY19TWbLqQ6RHc7sLDr8gFzgENHbHYb0LlB\nyXzgTOBu4PC67eeAy+ttH6Sa1Tq+vtxBkYaGnHGSJKlkRQQngMxcCiwd47lzRmnb6WHGzHwL8Jbe\nVDc1PFQnSVLZWj/HST9lcJIkqWwGp4IYnCRJKpvBqSBDQ54cLklSyQxOBXHGSZKkshmcCmJwkiSp\nbAangnSCU+au+0qSpKlncCrI0BBs2wabN7ddiSRJGo3BqSCDg9Xaw3WSJJXJ4FQQg5MkSWUzOBXE\n4CRJUtkMTgXpBCev5SRJUpkMTgUZGqrWzjhJklQmg1NBDjigWhucJEkqk8GpIAMDMGOGwUmSpFIZ\nnArj1cMlSSqXwakw3uhXkqRyGZwK44yTJEnlMjgVxuAkSVK5DE6FMThJklQug1NhBgc9x0mSpFIZ\nnAozNOSMkyRJpTI4FcZDdZIklcvgVBiDkyRJ5TI4FaYTnDLbrkSSJI1kcCrM0BBs2wabN7ddiSRJ\nGsngVJjBwWrt4TpJkspjcCqMwUmSpHIZnApjcJIkqVwGp8IMDVVrL4IpSVJ5DE6FccZJkqRyGZwK\nc8AB1drgJElSeQxOhRkYgBkzDE6SJJXI4FQgb/QrSVKZDE4F8ka/kiSVyeBUIO9XJ0lSmQxOBTI4\nSZJUJoNTgTzHSZKkMhmcCuQ5TpIklcngVCAP1UmSVCaDU4EMTpIklcngVCCDkyRJZTI4FagTnDLb\nrkSSJHUzOBVoaAi2bYPNm9uuRJIkdTM4FWhwsFp7uE6SpLIYnAr08z9fre+7r906JEnSjgxOBfrl\nX67W3/xmu3VIkqQdGZwKNGtWdZ7TN77RdiWSJKmbwalAETB3rsFJkqTSGJwKZXCSJKk8BqdCGZwk\nSSqPwalQc+fChg3w8MNtVyJJkjoMToWaO7da+806SZLKYXAqVCc4ebhOkqRyGJwKddBB1WJwkiSp\nHAangnmCuCRJZTE4FczgJElSWQxOBTM4SZJUFoNTwebOhY0bYdOmtiuRJElgcCqa36yTJKksBqeC\nGZwkSSqLwalgQ0Nw8MEGJ0mSSmFwKpwniEuSVI4JBaeIeEdEzBylfUZEvGP3y1KHwUmSpHJMdMbp\nAuDJo7TPrJ9TjxicJEkqx0SDUwA5Svs84IGJl6OR5s6FBx6oFkmS1K59xtM5Ih6kCkwJfD0iusPT\n3lSzUB/pXXnq/mbds5/dbi2SJE134wpOwJ9QzTb9HdUhue5LM24F7srMFT2qTcARR1Rrg5MkSe0b\nV3DKzI8DRMR64KuZ+cSkVKWfOOAAmDPH85wkSSrBRM9xehg4uvMgIk6NiOsi4r0RsW9vSlOHJ4hL\nklSGiQany4CnA0TE4cD/BjYDpwF/PZEdRsQbI2J9RGyJiJURcdxO+s6JiKsjYl1EbIuIxWP0Oy0i\n1tb7XBMRJ02ktrYZnCRJKsNEg9PTgdvrn08DvpKZZwKvBn5vvDuLiNOBD1CdN3UssAZYHhGzxthk\nP+A+4N1ddYzc53OBTwIfBZ4FfBa4LiKOGW99besEpxzte4ySJGnK7M7lCDrbvhC4of75u8BYYWdn\nFgGXZeaVmXkn8HqqGaxzR+ucmXdn5qLMvAp4aIx9vhn4x8xcnJnrMvMdwGrgTROor1Vz58KmTXD/\n/W1XIknS9DbR4HQr8LaI+APg+cAX6vanAfeOZ0cRMQAsAL7YacvMBG4Cjp9gfdTb3jSibflu7rMV\n3uxXkqQyTDQ4/QkwH/gw8J7M/Gbd/grg5nHuaxbVNaBGBq57gTkTrI96217vsxXdlySQJEntGe91\nnADIzP8AnjHKU38ObNutivQzZs6EX/gFg5MkSW2bUHDqiIgF/PSyBHdk5uoJ7OZ+qrA1e0T7bGDD\nbpS3YaL7XLRoEUNDQzu0DQ8PMzw8vBvl7B6/WSdJ0o6WLVvGsmXLdmjbtGnTGL17Y0LBKSKeQnUJ\ngucDP6qbD4yILwNnZObGpvvKzMcjYhVwAvC5ev9RP75kIvXVVoyyjxPr9p1asmQJ8+fP342X7r25\nc+HWW9uuQpKkcow2qbF69WoWLFgwaa850XOcPkR1X7pfycyDMvMg4FeBQSYWdhYDr4mIV0XEUVT3\nu5sJXAEQERdFxMe7N4iIeRHxrLqOg+vHR3d1uRh4UUS8JSKOjIh3Up2E/uEJ1Nc6L0kgSVL7Jnqo\n7kXACzNzbachM++IiDcCN453Z5l5TX3NpgupDqfdDizsmrmaAxw6YrPbqG42DNWJ6mcCdwOH1/tc\nERFnAu+pl28Ap2bmHeOtrwRz58KPfwz33FOd7yRJkqbeRIPTXsDjo7Q/zgRnsTJzKbB0jOfOGaVt\nl6+TmdcC106kntL8xm/APvvAtdfCm9/cdjWSJE1PEz1U9yXg4og4pNMQEb8ALKHrekzqnYMPhpe8\nBK64ou1KJEmaviYanN5EdT7TXRHxrYj4FrC+bvsfvSpOOzrnHLjtNlizpu1KJEmaniZ6HafvRsR8\nqtutHFU3r83MkVfqVg+ddFI18/Txj8PiUW9rLEmSJtO4Zpwi4gURcUdEDGblnzPzQ5n5IeDfI+Jr\nEbFwkmqd9gYG4Kyz4Kqr4PHRzjCTJEmTaryH6v4E+Ghm/syNdTNzE3AZHqqbVK9+NWzcCDfcsMuu\nkiSpx8YbnOYB/7ST528EnjnxcrQrz3wmHHusJ4lLktSG8Qan2Yx+GYKOJ4CDJ16OmjjnHPj856uZ\nJ0mSNHXGG5y+T3WF8LE8E/jBxMtRE8PDEAGf/GTblUiSNL2MNzjdALw7IvYf+UREzADeBXy+F4Vp\nbLNmwUtf6uE6SZKm2niD018BBwFfj4jzI+LUevkLYF393Ht6XaR+1qtfDbffXi2SJGlqjCs4Zea9\nwHOB/wIuAj5TL++t236z7qNJ9qIXwVOe4qyTJElTadxXDs/MuzPzZGAW8GzgOcCszDw5M9f3ukCN\nbmAA/uAP4Oqr4ZFH2q5GkqTpYaK3XCEzH8zMf8/MWzLzwV4WpWZe9zp49FE49VTYsqXtaiRJ2vNN\nODipfXPnwhe+ACtWwMtfXoUoSZI0eQxOfe55z4Prr4evfAVOOw22bm27IkmS9lwGpz3AC14A110H\nN94IZ5zhfewkSZosBqc9xMKFcO211RXFzzwTfuBlSCVJ6jmD0x7kJS+Ba66pbgB86KHVeU833ADb\ntrVdmSRJewaD0x7mZS+D738fLrkE1q+HF78YnvY0eNvb4DOfgbVrPQ9KkqSJ2qftAtR7Bx4Ib3gD\nnHce3HorXH45LF0KD9YXjdh7bzj88OpbeQcdVPXvLIODsN9+Oy777gv77FNtN3LZa6+fLhHV0v3z\neNsma5EkqRcMTnuwCDjuuGq5/HK47z64805Yt65af+tbcPfdsGYN/OhH1fLww21XPbl6GcQme9vR\n+o3ndZtu3+t6Smjr/FzqtlPRr9fb7mx/u7uffmgb2d7m/qZi26not7vbtsXgNE1EwOzZ1fL854/d\nb/v26lDe1q3w2GPVsnVrdZ7UE09U686yfTtkVuvO0nncve4sIx/3wwK97be7247Wd3dq7FXbzsZ2\nV9t2fp7M+nr9uk23LaWftCeb6lBlcNIO9toL9t+/WiTtWSYaznbWNtFtJrptk7aR7ZPxGlNRy1Rs\nW1K/Xm17991w0UVMGoOTJE0TJR3ukCbL6tWTG5z8Vp0kSVJDBidJkqSGDE6SJEkNGZwkSZIaMjhJ\nkiQ1ZHCSJElqyOAkSZLUkMFJkiSpIYOTJElSQwYnSZKkhgxOkiRJDRmcJEmSGjI4SZIkNWRwkiRJ\nasjgJEmS1JDBSZIkqSGDkyRJUkMGJ0mSpIYMTpIkSQ0ZnCRJkhoyOEmSJDVkcJIkSWrI4CRJktSQ\nwUmSJKkhg5MkSVJDBidJkqSGDE6SJEkNGZwkSZIaMjhJkiQ1ZHCSJElqyOAkSZLUkMFJkiSpIYOT\nJElSQwYnSZKkhgxOkiRJDRmcJEmSGjI4SZIkNWRwkiRJasjgJEmS1JDBSZIkqSGDkyRJUkMGJ0mS\npIYMTpIkSQ0ZnCRJkhoyOEmSJDVkcJIkSWqomOAUEW+MiPURsSUiVkbEcbvo/9sRsSoiHo2Ir0fE\n2SOePzsitkfEtnq9PSI2T+67kCRJe7IiglNEnA58ALgAOBZYAyyPiFlj9D8M+DzwRWAecDHwsYg4\ncUTXTcCcruWXJqF8SZI0TRQRnIBFwGWZeWVm3gm8HtgMnDtG//OAb2fm+Zm5LjMvBT5d76dbZubG\nzLyvXjZO2juQJEl7vNaDU0QMAAuoZo+AKu0ANwHHj7HZc+rnuy0fpf+TI+KuiPhORFwXEcf0qGxJ\nkjQNtR6cgFnA3sC9I9rvpTq8Npo5Y/QfjIj96sfrqGasTgFeSfVeb46IQ3pRtCRJmn72abuAyZKZ\nK4GVnccRsQJYC7yO6lwqSZKkcSkhON0PbANmj2ifDWwYY5sNY/R/KDMfG22DzHwiIm4DjthVQYsW\nLWJoaGiHtuHhYYaHh3e1qSRJmiLLli1j2bJlO7Rt2rRpUl8zqtOJ2hURK4F/y8w/rh8H8B3gksx8\n/yj93weclJnzuto+CRyYmSeP8Rp7AV8DvpCZfzZGn/nAqlWrVjF//vzdfVuSJGmKrV69mgULFgAs\nyMzVvd5/Cec4ASwGXhMRr4qIo4CPADOBKwAi4qKI+HhX/48Ah0fE/4qIIyPiDcAr6v1Qb/P2iDgx\nIp4WEccCVwNPBT42NW9JkiTtaUo4VEdmXlNfs+lCqkNutwMLuy4fMAc4tKv/XRHxYmAJ8Gbge8Af\nZmb3N+1+Dri83vZBYBVwfH25A0mSpHErIjgBZOZSYOkYz50zStv/pbqMwVj7ewvwlp4VKEmSpr1S\nDtVJkiQVz+AkSZLUkMFJkiSpIYOTJElSQwYnSZKkhgxOkiRJDRmcJEmSGjI4SZIkNWRwkiRJasjg\nJEmS1JDBSZIkqSGDkyRJUkMGJ0mSpIYMTpIkSQ0ZnCRJkhoyOEmSJDVkcJIkSWrI4CRJktSQwUmS\nJKkhg5MkSVJDBidJkqSGDE6SJEkNGZwkSZIaMjhJkiQ1ZHCSJElqyOAkSZLUkMFJkiSpIYOTJElS\nQwYnSZKkhgxOkiRJDRmcJEmSGjI4SZIkNWRwkiRJasjgJEmS1JDBSZIkqSGDkyRJUkMGJ0mSpIYM\nTpIkSQ0ZnCRJkhoyOEmSJDVkcJIkSWrI4CRJktSQwUmSJKkhg5MkSVJDBidJkqSGDE6SJEkNGZwk\nSZIaMjhJkiQ1ZHCSJElqyOAkSZLUkMFJkiSpIYOTJElSQwYnSZKkhgxOkiRJDRmcJEmSGjI4SZIk\nNWRwkiRJasjgJEmS1JDBSZIkqSGDkyRJUkMGJ0mSpIYMTpIkSQ0ZnCRJkhoyOEmSJDVkcJIkSWrI\n4CRJktSQwUmSJKkhg5MkSVJDBidJkqSGDE6SJEkNFROcIuKNEbE+IrZExMqIOG4X/X87IlZFxKMR\n8fWIOHuUPqdFxNp6n2si4qTJewfqtWXLlrVdgro4HmVxPMrieEwfRQSniDgd+ABwAXAssAZYHhGz\nxuh/GPB54IvAPOBi4GMRcWJXn+cCnwQ+CjwL+CxwXUQcM2lvRD3l/4jK4niUxfEoi+MxfRQRnIBF\nwGWZeWVm3gm8HtgMnDtG//OAb2fm+Zm5LjMvBT5d76fjzcA/Zubius87gNXAmybvbUiSpD1Z68Ep\nIgaABVSzRwBkZgI3AcePsdlz6ue7LR/R//gGfSRJkhprPTgBs4C9gXtHtN8LzBljmzlj9B+MiP12\n0WesfUqSJO3UPm0XUJj9AdauXdt2HQI2bdrE6tWr2y5DNcejLI5HWRyPcnT9Dt9/MvZfQnC6H9gG\nzB7RPhvYMMY2G8bo/1BmPraLPmPtE+AwgLPOOmvnFWvKLFiwoO0S1MXxKIvjURbHoziHATf3eqet\nB6fMfDwiVgEnAJ8DiIioH18yxmYrgJGXFvjdur27z8h9nDiiz0jLgVcCdwGPNnsHkiSpIPtThabl\nk7HzqM7DbldE/D5wBdW36W6h+nbcK4CjMnNjRFwEHJKZZ9f9DwP+E1gK/B1VQPogcHJm3lT3OR74\nF+CtwBeAYeAvgfmZeccUvTVJkrQHaX3GCSAzr6mv2XQh1eG024GFmbmx7jIHOLSr/10R8WJgCdVl\nB74H/GEnNNV9VkTEmcB76uUbwKmGJkmSNFFFzDhJkiT1gxIuRyBJktQXDE618d4rT7svIt4aEbdE\nxEMRcW9EfCYinj5Kvwsj4p6I2BwR/xwRR7RR73QTEX8ZEdsjYvGIdsdjikTEIRHxiYi4v/6810TE\n/BF9HI8pEBF7RcS7I+Lb9Wf9zYh42yj9HI9JEBG/FRGfi4jv1/9fOmWUPjv97CNiv4i4tP739HBE\nfDoinjLeWgxOjP9eeeqZ3wI+BDwbeCEwANwYETM6HSLiL6huk/Na4NeBR6jGZt+pL3f6qP9weC3V\nv4XudsdjikTEgcBXgceAhcDRwJ8CD3b1cTymzl8CrwPeABwFnA+cHxE/uY2X4zGpnkR1/vMbgJ85\nx6jhZ/9B4MXA7wHPAw4Brh13JZk57RdgJXBx1+OgOuH8/LZrm04L1VXktwO/2dV2D7Co6/EgsAX4\n/bbr3VMX4MnAOuAFwJeBxY5HK+PwPuAru+jjeEzdeFwPfHRE26eBKx2PKR+L7cApI9p2+tnXjx8D\nXt7V58h6X78+ntef9jNOE7xXnibHgVR/STwAEBFPo/pGZffYPAT8G47NZLoUuD4zv9Td6HhMuZcC\nt0bENfWh7NUR8UedJx2PKXczcEJEzAWIiHnAbwA31I8dj5Y0/Ox/jepKAt191gHfYZzjU8TlCFq2\ns3vlHTn15UxP9UVPPwj8a/70khFzqIKU9xycIhFxBvAsqv/JjOR4TK3DgfOoTiN4D9Xhh0si4rHM\n/ASOx1R7H9WsxZ0RsY3qVJf/mZmfqp93PNrT5LOfDWytA9VYfRoxOKkUS4FjqP6CUwsi4hepwusL\nM/PxtusRewG3ZObb68drIuJXqS4U/In2ypq2TgfOBM4A7qD6A+PiiLinDrKaJqb9oTomdq889VBE\nfBg4GfjtzPxB11MbqM43c2ymxgLgYGB1RDweEY8Dzwf+OCK2Uv1l5nhMnR8AI+84vhZ4av2z/z6m\n1l8D78vMf8jMr2Xm1VQXYX5r/bzj0Z4mn/0GYN+IGNxJn0amfXCq/7Lu3CsP2OFeeT2/OaB2VIem\nU4HfyczvdD+Xmeup/oPuHptBqm/hOTa9dxPwDKq/pOfVy63AVcC8zPw2jsdU+io/e7rAkcDd4L+P\nFsyk+iO723bq36OOR3safvargCdG9DmS6g+Rnd3D9md4qK6yGLiivtlw5155M6nun6dJEhFLqe4h\neArwSER0/lrYlJmdmyx/EHhbRHyT6ubL76b6xuNnp7jcPV5mPkJ1COInIuIR4IeZ2Zn5cDymzhLg\nqxHxVuAaql8CfwS8pquP4zF1rqf6rL8HfA2YT/W74mNdfRyPSRIRTwKOoJpZAji8PkH/gcz8Lrv4\n7DPzoYj4W2BxRDwIPAxcAnw1M28ZVzFtf62wlIXq2hB3UX19cQXwa23XtKcvVH+tbRtledWIfu+k\n+qrpZqq7XR/Rdu3TZQG+RNflCByPKf/8Twb+o/6svwacO0ofx2NqxuJJVH9kr6e6RtA3gHcB+zge\nU/L5P3+M3xl/1/SzB/ajunbg/XVw+gfgKeOtxXvVSZIkNTTtz3GSJElqyuAkSZLUkMFJkiSpIYOT\nJElSQwYnSZKkhgxOkiRJDRmcJEmSGjI4SZIkNWRwkiRJasjgJEmjiIjtEXFK23VIKovBSVJxIuLv\n6+CyrV53fr6h7dokTW/7tF2AJI3hH4FX89O7oQM81k4pklRxxklSqR7LzI2ZeV/Xsgl+chjt9RFx\nQ0RsjohvRcTvdW8cEb8aEV+sn78/Ii6LiCeN6HNuRPxXRDwaEd+PiEtG1HBwRPyfiHgkIr4eES/t\n2vbAiLg6Iu6rX2NdRJw9aZ+GpCIYnCT1qwuBfwCeCVwNfCoijgSIiJnAcuCHwALgFcALgQ91No6I\n84APAx8BfgV4MfD1Ea/xDuBTwDOAG4CrI+LA+rm/Ao4CFtbr84D7e/0mJZUlMrPtGiRpBxHx98BZ\nwKNdzQk646D2AAACD0lEQVS8NzPfFxHbgaWZ+aaubVYAqzLzTRHxGuAi4Bcz89H6+ZOA64H/lpkb\nI+J7wN9m5gVj1LAduDAz31k/ngn8GHhRZt4YEZ8FNmbmH/X23Usqmec4SSrVl4DXs+M5Tg90/bxy\nRP8VwLz656OANZ3QVPsq1Sz7kREBcEj9Gjvzn50fMnNzRDwEPKVu+hvg2ohYANwIXJeZK3b1piT1\nN4OTpFI9kpnrJ2nfWxr2e3zE46Q+xSEz/ykingqcDJwI3BQRl2bm+b0rU1JpPMdJUr96ziiP19Y/\nrwXmRcSMrud/E9gG3JmZPwbuAk7YnQIy84eZ+YnMfBWwCHjt7uxPUvmccZJUqv0iYvaIticy84f1\nz6dFxCrgX6nOhzoOOLd+7mrgncDHI+JdVIfXLgGuzMzOCdzvBP4mIjZSXfpgEHhuZn64SXH1flcB\nXwP2B14C3DHeNympvxicJJXqRcA9I9rWAcfUP18AnAFcCvwAOCMz7wTIzC0RsRC4GLgF2Ax8GvjT\nzo4y88qI2I9qpuj9VN+I+3TXa432zZnsat8KvBc4jOrQ3/8DhifwPiX1Eb9VJ6nv1N94e1lmfq7t\nWiRNL57jJEmS1JDBSVI/cqpcUis8VCdJktSQM06SJEkNGZwkSZIaMjhJkiQ1ZHCSJElqyOAkSZLU\nkMFJkiSpIYOTJElSQwYnSZKkhgxOkiRJDf1/9YCmkE49IDYAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAagAAAEYCAYAAAAJeGK1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4xLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvDW2N/gAAIABJREFUeJzt3X+UHWWd5/H3p3+k88sEElqF/CBxyCwTlR/SRNSVdfBXWJWwMyBhUdBlNytnctRxdYR11WNGzyzHPaKcYRmiIKBIRFY0q2iGAWR3ZgDTgUgIbKSJGWgSoTEBgmCSTr77Rz2drr65nb43dN1b6f68Dvd01VNVTz1Vfcmnq+5zn1JEYGZmVjYtzW6AmZlZNQ4oMzMrJQeUmZmVkgPKzMxKyQFlZmal5IAyM7NSKjSgJC2WtElSj6RLqyz/lKRHJD0k6U5Jx+aWXSTpsfS6KFd+iqQNqc4rJanIYzAzs+ZQUd+DktQK/Bp4N9ALrAXOj4hHcuv8KXB/RLwk6RLgHRFxnqQZQDfQBQSwDjglInZI+iXwCeA+4Hbgyoj4WSEHYWZmTVPkFdQioCciNkfEbmAVsCS/QkTcHREvpdn7gNlp+r3AHRGxPSJ2AHcAiyUdDUyLiHsjS9YbgbMLPAYzM2uStgLrngU8mZvvBd58kPUvBgauhKptOyu9equUH0DSMmAZwJQpU045/vjj62n7qNn09E4mtbcyd8bkpuzfzKxs1q1b92xEdI60XpEBVe2zoar3EyV9iOx23r8ZYdua64yIlcBKgK6uruju7h6pvYV419fuYcGrp3L1h05pyv7NzMpG0r/Usl6Rt/h6gTm5+dnA1sqVJL0L+BxwVkTsGmHbXgZvAw5bZ5m0tYg9ez3eoZlZvYoMqLXAAknzJU0AlgKr8ytIOhm4hiycnsktWgO8R9KRko4E3gOsiYhtwE5Jp6XeexcCPy7wGF6x9tYW+vfta3YzzMwOO4Xd4ouIfknLycKmFbguIjZKWgF0R8Rq4KvAVOAHqbf4ExFxVkRsl/TXZCEHsCIitqfpS4DrgUlkn1mVugdfW6vo9xWUmVndivwMioi4nawreL7sC7npdx1k2+uA66qUdwNvGMVmFqq9pYU9e30FZWZWL48kUbC2VtG/z1dQZmb1ckAVrK21hX5fQZmZ1c0BVbB29+IzMzskDqiCZbf4fAVlZlYvB1TBslt8voIyM6uXA6pg7S1ij6+gzMzq5oAqmK+gzMwOjQOqYO2t7iRhZnYoHFAFa2vxUEdmZofCAVUwD3VkZnZoHFAFa2/1UEdmZofCAVWwthYPdWRmdigcUAVra21h774ge0K9mZnVygFVsPaW7CHA7slnZlYfB1TB2lqzU+yefGZm9XFAFay91VdQZmaHwgFVsLZ0i8+P3DAzq48DqmCDt/h8BWVmVg8HVMEGb/H5CsrMrB4OqIK1taQrKH8GZWZWl0IDStJiSZsk9Ui6tMry0yU9IKlf0jm58j+VtD73+oOks9Oy6yX9JrfspCKP4ZVqS1dQ7sVnZlaftqIqltQKXAW8G+gF1kpaHRGP5FZ7AvgI8On8thFxN3BSqmcG0AP8fW6Vz0TErUW1fTS1p8+g3IvPzKw+hQUUsAjoiYjNAJJWAUuA/QEVEVvSsoNdXpwD/CwiXiquqcUZ7MXngDIzq0eRt/hmAU/m5ntTWb2WAjdXlH1F0kOSrpDUUW0jScskdUvq7uvrO4Tdjo72tuwU73YnCTOzuhQZUKpSVtdlhKSjgTcCa3LFlwHHA6cCM4DPVts2IlZGRFdEdHV2dtaz21E1ub0VgJd3721aG8zMDkdFBlQvMCc3PxvYWmcdHwRui4g9AwURsS0yu4Bvk91KLK0pHdld1Bd39Te5JWZmh5ciA2otsEDSfEkTyG7Vra6zjvOpuL2XrqqQJOBs4OFRaGthpqaA+r0DysysLoUFVET0A8vJbs89CtwSERslrZB0FoCkUyX1AucC10jaOLC9pHlkV2D3VFR9k6QNwAbgKODLRR3DaJg6MQXUbgeUmVk9iuzFR0TcDtxeUfaF3PRaslt/1bbdQpVOFRFxxui2slhTfYvPzOyQeCSJgnW0tdDaIt/iMzOrkwOqYJKYMqGV3+9yLz4zs3o4oBpgakebb/GZmdXJAdUAUzrafIvPzKxODqgGmOIrKDOzujmgGmCqr6DMzOrmgGqAKR3uJGFmVi8HVAP4Fp+ZWf0cUA3gXnxmZvVzQDXAQC++CD8TysysVg6oBpja0Ub/vmBXv58JZWZWKwdUA0yZkD0Tyj35zMxq54BqgCn7H7nhnnxmZrVyQDWARzQ3M6ufA6oB9l9B+ZlQZmY1c0A1gB/7bmZWPwdUA/ix72Zm9XNANcCUDvfiMzOrlwOqAQY7SbgXn5lZrRxQDTDFt/jMzOpWaEBJWixpk6QeSZdWWX66pAck9Us6p2LZXknr02t1rny+pPslPSbp+5ImFHkMo6G9tYUJbS0OKDOzOhQWUJJagauAM4GFwPmSFlas9gTwEeB7Vap4OSJOSq+zcuWXA1dExAJgB3DxqDe+AB4w1sysPkVeQS0CeiJic0TsBlYBS/IrRMSWiHgIqGmQOkkCzgBuTUU3AGePXpOLkz0TygFlZlarIgNqFvBkbr43ldVqoqRuSfdJGgihmcBzETHwL/2wdUpalrbv7uvrq7fto27KhDZ3kjAzq0NbgXWrSlk9z5uYGxFbJb0OuEvSBuCFWuuMiJXASoCurq6mP+fCj303M6tPkVdQvcCc3PxsYGutG0fE1vRzM/AL4GTgWeAISQPBWledzTSlo81DHZmZ1aHIgFoLLEi97iYAS4HVI2wDgKQjJXWk6aOAtwGPRPbEv7uBgR5/FwE/HvWWF8CdJMzM6lNYQKXPiZYDa4BHgVsiYqOkFZLOApB0qqRe4FzgGkkb0+Z/AnRL+hVZIP33iHgkLfss8ClJPWSfSV1b1DGMJneSMDOrT5GfQRERtwO3V5R9ITe9luw2XeV2/wy8cZg6N5P1EDysZI99dycJM7NaeSSJBpmaPoPK7lKamdlIHFANMrWjjQh4abevoszMauGAahCPx2dmVh8HVIP4se9mZvVxQDXI4BWUb/GZmdXCAdUgAw8t9BWUmVltHFAN4se+m5nVxwHVIPtv8Xm4IzOzmjigGsSdJMzM6uOAapCBK6gX/+CAMjOrhQOqQSa3Z50k/BmUmVltHFAN0tIipkxo9UMLzcxq5IBqoCl+aKGZWc0cUA00taONF92Lz8ysJg6oBvIVlJlZ7RxQDeSHFpqZ1c4B1UDZY9/dScLMrBYOqAbyLT4zs9o5oBrIAWVmVjsHVANlt/gcUGZmtSg0oCQtlrRJUo+kS6ssP13SA5L6JZ2TKz9J0r2SNkp6SNJ5uWXXS/qNpPXpdVKRxzCapkxoY1f/Pvr37mt2U8zMSq+tqIoltQJXAe8GeoG1klZHxCO51Z4APgJ8umLzl4ALI+IxSccA6yStiYjn0vLPRMStRbW9KAPPhPr9rr1Mn+yLVzOzgyksoIBFQE9EbAaQtApYAuwPqIjYkpYNuaSIiF/nprdKegboBJ7jMLZ/RPPd/Uyf3N7k1piZlVuRf8bPAp7MzfemsrpIWgRMAB7PFX8l3fq7QlLHMNstk9Qtqbuvr6/e3RZiih9aaGZWsyIDSlXKoq4KpKOB7wAfjYiBq6zLgOOBU4EZwGerbRsRKyOiKyK6Ojs769ltYfxMKDOz2hUZUL3AnNz8bGBrrRtLmgb8FPhvEXHfQHlEbIvMLuDbZLcSDwu+gjIzq12RAbUWWCBpvqQJwFJgdS0bpvVvA26MiB9ULDs6/RRwNvDwqLa6QIOdJBxQZmYjKSygIqIfWA6sAR4FbomIjZJWSDoLQNKpknqBc4FrJG1Mm38QOB34SJXu5DdJ2gBsAI4CvlzUMYy2wVt8Hu7IzGwkRfbiIyJuB26vKPtCbnot2a2/yu2+C3x3mDrPGOVmNoxv8ZmZ1c5fxmkgd5IwM6udA6qBOtpamNrRRt/OXc1uiplZ6TmgGkgSs4+cxJPbX2p2U8zMSs8B1WBzZ0zmCQeUmdmIHFANNhBQEXV9Z9nMbNxxQDXY3JmT2dW/z59DmZmNwAHVYHNmTAbwbT4zsxE4oBpsrgPKzKwmDqgGm3XEJCQHlJnZSBxQDTaxvZXXTpvogDIzG4EDqgnmzJjs70KZmY3AAdUE/i6UmdnIHFBNMHfGZJ5+YRd/2ONRzc3MhuOAaoKBnny9O3wVZWY2HAdUE/i7UGZmI6spoCR9p5Yyq82cGZMAeOJ3Digzs+HUegX1+vyMpFbglNFvzvjQObWDie0tPLH95WY3xcystA4aUJIuk7QTOEHSC+m1E3gG+HFDWjgGSXJPPjOzERw0oCLibyLiVcBXI2Jaer0qImZGxGUNauOYNHfGZHeSMDM7iFpv8f1E0hQASR+S9DVJxxbYrjFvjh+7YWZ2ULUG1NXAS5JOBP4K+BfgxpE2krRY0iZJPZIurbL8dEkPSOqXdE7FsoskPZZeF+XKT5G0IdV5pSTVeAylMnfGZF7avZff/X53s5tiZlZKtQZUf2R/6i8BvhER3wBedbANUkeKq4AzgYXA+ZIWVqz2BPAR4HsV284Avgi8GVgEfFHSkWnx1cAyYEF6La7xGErFo5qbmR1crQG1U9JlwIeBn6bwaR9hm0VAT0RsjojdwCqygNsvIrZExEPAvopt3wvcERHbI2IHcAewWNLRwLSIuDcF5o3A2TUeQ6kMBJTH5DMzq67WgDoP2AX8h4j4LTAL+OoI28wCnszN96ayWgy37aw0PWKdkpZJ6pbU3dfXV+NuG2f2kekKyt+FMjOrqqaASqF0EzBd0vuBP0TESJ9BVftsqNYeAcNtW3OdEbEyIroioquzs7PG3TbOpAmtvPpVHb7FZ2Y2jFpHkvgg8EvgXOCDwP2VnRqq6AXm5OZnA1trbNdw2/am6UOps3TmzpjM5md/3+xmmJmVUq23+D4HnBoRF0XEhWSfL31+hG3WAgskzZc0AVgKrK5xf2uA90g6MnWOeA+wJiK2kX0edlrqvXchh/EXhk973UwefGIHfTt3NbspZmalU2tAtUTEM7n53420bUT0A8vJwuZR4JaI2ChphaSzACSdKqmX7MrsGkkb07bbgb8mC7m1wIpUBnAJ8C2gB3gc+FmNx1A6HzjxGPYF/Ozhbc1uiplZ6aiWL4pK+ipwAnBzKjoPeCgiPltg20ZNV1dXdHd3N7sZVb3ninuYPqmdH3zsrc1uiplZQ0haFxFdI6030lh8x0l6W0R8BriGLKROBO4FVo5KS8e5D5xwDGu37GDrcx441swsb6RbfF8HdgJExA8j4lMR8ZfA7WmZvUIfOPEYAH7y0GHb18PMrBAjBdS89EXaISKiG5hXSIvGmXlHTeGE2dP537/y51BmZnkjBdTEgyybNJoNGc8+cMIxbHjqeX7jLudmZvuNFFBrJf2nykJJFwPrimnS+PO+E44G4Ce/8m0+M7MBbSMs/yRwm6QLGAykLmAC8O+KbNh4cswRkzh13pGs/tVWlp9xHIfpAO1mZqNqpO8yPR0RbwW+BGxJry9FxFvS8Ec2Ss49ZQ6PPfMiX/+Hx5rdFDOzUhjpCgqAiLgbuLvgtoxr53bN5pdbtvONOx/jtdMncv6iuc1ukplZU9UUUFY8SfzNn72RZ1/cxedu20Dn1A7etfA1zW6WmVnT1DrUkTVAe2sLV/37N/GGWdNZfvMD/N09j/PcS37irpmNTzUNdXS4K/NQR9U8++IuPrlqPf/Y8ywT21v4szfNZvHrX8sfv+ZVvGZahztRmNlhrdahjhxQJfbothe4/p+28KP1T7GrP3vo8LSJbRw7cwpHTG5n2qR2pk1sZ2J7Cx1trUxoa6G9RbS0iNYW0apsukXQIiFltxIF2TTZsoHp9N/QddK25MoH6gJy66nqtsPWS2pPqiQ/L+WnsxV0kDoG5MtaRqhjSNtz9VQ7R4N1V29rZV0DBSO1ubL+wXry59B/jNjY44DKOVwDasDzL+/hka0v8Ound/Lrp3fSu+Nlnn95Dy+8vIcX/rCHXXv2sWvvPnanELOxZ39AMjTAoPKPgMGyaqFHZbBWhCj766gIZoYGbL4tQ9pYLdTz+6tow9B9HvhHR+XxDnd85Os/yL6HnLNcW6q1odofQfn25LcZuu8D93ew8zlk3xV/5AzWc+Dx5WuuXE61uoccw9BjHVpnxe+0yjanHHskZ59c6wPSD1RrQLmTxGFg+qR23vJHM3nLH8086HoRwd59wd4I+vcG+yLYF4PlAUSaH5jet386GPhbJQKCSD+zZfsCSGX7BtYbWCeGmWZwX+T2Ua0dQbYghtl/2n3FfrJlaVFq/+B2+yIOXK9q24fZx/52V5Tl2k1u28G6h+5v6LEMzud/b4PrDr+/yvryxz1w/qrvf/j9Dtk+t/7gucqVV9l3vk0M2TYq6ql+nocsr1aWa0fV81uxbv58Dq1n6Pv7gLKB8oBgX9X3V+X5yZ/MA4+12v6Gb1tl3UP2e7DjG66e3O+52jaD+zvwfA9tX/Xlba16RQFVKwfUGCKJtlbRBnT4N2tmhzn34jMzs1JyQJmZWSk5oMzMrJQcUGZmVkoOKDMzK6VCA0rSYkmbJPVIurTK8g5J30/L75c0L5VfIGl97rVP0klp2S9SnQPLXl3kMZiZWXMUFlCSWoGrgDOBhcD5khZWrHYxsCMijgOuAC4HiIibIuKkiDgJ+DCwJSLW57a7YGB5RDxT1DGYmVnzFHkFtQjoiYjNEbEbWAUsqVhnCXBDmr4VeKcOHNvlfODmAttpZmYlVGRAzQKezM33prKq60REP/A8UDlcwnkcGFDfTrf3Pl8l0ACQtExSt6Tuvr6+Qz0GMzNrkiIDqlpwRD3rSHoz8FJEPJxbfkFEvBF4e3p9uNrOI2JlRHRFRFdnZ2d9LTczs6YrMqB6gTm5+dnA1uHWkdQGTAe255YvpeLqKSKeSj93At8ju5VoZmZjTJEBtRZYIGm+pAlkYbO6Yp3VwEVp+hzgrkijGkpqAc4l++yKVNYm6ag03Q68H3gYMzMbcwobUjQi+iUtB9YArcB1EbFR0gqgOyJWA9cC35HUQ3bltDRXxelAb0RszpV1AGtSOLUC/wB8s6hjMDOz5vHzoMzMrKFqfR6UR5IwM7NSckCZmVkpOaDMzKyUHFBmZlZKDigzMyslB5SZmZWSA8rMzErJAWVmZqXkgDIzs1JyQJmZWSk5oMzMrJQcUGZmVkoOKDMzKyUHlJmZlZIDyszMSskBZWZmpeSAMjOzUnJAmZlZKTmgzMyslBxQZmZWSoUGlKTFkjZJ6pF0aZXlHZK+n5bfL2leKp8n6WVJ69Pr73LbnCJpQ9rmSkkq8hjMzKw5CgsoSa3AVcCZwELgfEkLK1a7GNgREccBVwCX55Y9HhEnpdfHcuVXA8uABem1uKhjMDOz5inyCmoR0BMRmyNiN7AKWFKxzhLghjR9K/DOg10RSToamBYR90ZEADcCZ49+083MrNmKDKhZwJO5+d5UVnWdiOgHngdmpmXzJT0o6R5Jb8+t3ztCnQBIWiapW1J3X1/fKzsSMzNruCIDqtqVUNS4zjZgbkScDHwK+J6kaTXWmRVGrIyIrojo6uzsrKPZZmZWBkUGVC8wJzc/G9g63DqS2oDpwPaI2BURvwOIiHXA48Afp/Vnj1CnmZmNAUUG1FpggaT5kiYAS4HVFeusBi5K0+cAd0VESOpMnSyQ9DqyzhCbI2IbsFPSaemzqguBHxd4DGZm1iRtRVUcEf2SlgNrgFbguojYKGkF0B0Rq4Frge9I6gG2k4UYwOnACkn9wF7gYxGxPS27BLgemAT8LL3MzGyMUdYZbmzr6uqK7u7uZjfDzMwASesiomuk9TyShJmZlZIDyszMSskBZWZmpeSAMjOzUnJAmZlZKTmgzMyslBxQZmZWSg4oMzMrJQeUmZmVkgPKzMxKyQFlZmal5IAyM7NSckCZmVkpOaDMzKyUHFBmZlZKDigzMyslB5SZmZWSA8rMzErJAWVmZqXkgDIzs1IqNKAkLZa0SVKPpEurLO+Q9P20/H5J81L5uyWtk7Qh/Twjt80vUp3r0+vVRR6DmZk1R1tRFUtqBa4C3g30AmslrY6IR3KrXQzsiIjjJC0FLgfOA54FPhARWyW9AVgDzMptd0FEdBfVdjMza74ir6AWAT0RsTkidgOrgCUV6ywBbkjTtwLvlKSIeDAitqbyjcBESR0FttXMzEqmyICaBTyZm+9l6FXQkHUioh94HphZsc6fAw9GxK5c2bfT7b3PS1K1nUtaJqlbUndfX98rOQ4zM2uCIgOqWnBEPetIej3Zbb//nFt+QUS8EXh7en242s4jYmVEdEVEV2dnZ10NNzOz5isyoHqBObn52cDW4daR1AZMB7an+dnAbcCFEfH4wAYR8VT6uRP4HtmtRDMzG2OKDKi1wAJJ8yVNAJYCqyvWWQ1clKbPAe6KiJB0BPBT4LKI+KeBlSW1SToqTbcD7wceLvAYzMysSQoLqPSZ0nKyHniPArdExEZJKySdlVa7FpgpqQf4FDDQFX05cBzw+Yru5B3AGkkPAeuBp4BvFnUMZmbWPIqo/Fho7Onq6orubvdKNzMrA0nrIqJrpPU8koSZmZWSA8rMzErJAWVmZqXkgDIzs1JyQJmZWSk5oMzMrJQcUGZmVkoOKDMzKyUHlJmZlZIDyszMSskBZWZmpeSAMjOzUnJAmZlZKTmgzMyslBxQZmZWSg4oMzMrJQeUmZmVkgPKzMxKyQFlZmal5IAyM7NSKjSgJC2WtElSj6RLqyzvkPT9tPx+SfNyyy5L5ZskvbfWOs3MbGwoLKAktQJXAWcCC4HzJS2sWO1iYEdEHAdcAVyetl0ILAVeDywG/qek1hrrNDOzMaDIK6hFQE9EbI6I3cAqYEnFOkuAG9L0rcA7JSmVr4qIXRHxG6An1VdLnWZmNga0FVj3LODJ3Hwv8Obh1omIfknPAzNT+X0V285K0yPVCYCkZcCyNPuipE11tv8o4Nk6txlrfA4yPg8ZnwefgwGv9DwcW8tKRQaUqpRFjesMV17tiq+yzqwwYiWw8mANPBhJ3RHRdajbjwU+Bxmfh4zPg8/BgEadhyJv8fUCc3Lzs4Gtw60jqQ2YDmw/yLa11GlmZmNAkQG1Flggab6kCWSdHlZXrLMauChNnwPcFRGRypemXn7zgQXAL2us08zMxoDCbvGlz5SWA2uAVuC6iNgoaQXQHRGrgWuB70jqIbtyWpq23SjpFuARoB/4i4jYC1CtzoIO4ZBvD44hPgcZn4eMz4PPwYCGnAdlFyxmZmbl4pEkzMyslBxQZmZWSg6oCuN1KCVJcyTdLelRSRslfSKVz5B0h6TH0s8jm93WoqVRSx6U9JM0Pz8NxfVYGpprQrPbWDRJR0i6VdL/S++Jt4zT98Jfpv8fHpZ0s6SJ4+H9IOk6Sc9IejhXVvX3r8yV6d/MhyS9abTa4YDKGedDKfUD/yUi/gQ4DfiLdOyXAndGxALgzjQ/1n0CeDQ3fzlwRToHO8iG6BrrvgH8PCKOB04kOx/j6r0gaRbwcaArIt5A1jFrKePj/XA92TBzecP9/s8k62m9gGxwhKtHqxEOqKHG7VBKEbEtIh5I0zvJ/kGaxdDhqG4Azm5OCxtD0mzgfcC30ryAM8iG4oLxcQ6mAaeT9bIlInZHxHOMs/dC0gZMSt/TnAxsYxy8HyLi/5D1rM4b7ve/BLgxMvcBR0g6ejTa4YAaqtrwTLOGWXfMSqPKnwzcD7wmIrZBFmLAq5vXsob4OvBXwL40PxN4LiL60/x4eE+8DugDvp1udX5L0hTG2XshIp4C/gfwBFkwPQ+sY/y9HwYM9/sv7N9NB9RQtQzPNKZJmgr8L+CTEfFCs9vTSJLeDzwTEevyxVVWHevviTbgTcDVEXEy8HvG+O28atJnLEuA+cAxwBSy21mVxvr7YSSF/T/igBpqXA+lJKmdLJxuiogfpuKnBy7X089nmtW+BngbcJakLWS3d88gu6I6It3igfHxnugFeiPi/jR/K1lgjaf3AsC7gN9ERF9E7AF+CLyV8fd+GDDc77+wfzcdUEON26GU0mct1wKPRsTXcovyw1FdBPy40W1rlIi4LCJmR8Q8st/9XRFxAXA32VBcMMbPAUBE/BZ4UtK/SkXvJBvVZdy8F5IngNMkTU7/fwych3H1fsgZ7ve/Grgw9eY7DXh+4FbgK+WRJCpI+rdkfzUPDKX0lSY3qSEk/Wvg/wIbGPz85b+SfQ51CzCX7H/YcyOi8sPTMUfSO4BPR8T7Jb2O7IpqBvAg8KGI2NXM9hVN0klkHUUmAJuBj5L9QTuu3guSvgScR9bL9UHgP5J9vjKm3w+SbgbeQfZYjaeBLwI/osrvP4X335L1+nsJ+GhEdI9KOxxQZmZWRr7FZ2ZmpeSAMjOzUnJAmZlZKTmgzMyslBxQZmZWSg4oswaRtFfS+txr1EZnkDQvP/K02VhQ2CPfzewAL0fESc1uhNnhwldQZk0maYukyyX9Mr2OS+XHSrozPWPnTklzU/lrJN0m6Vfp9dZUVaukb6bnF/29pElp/Y9LeiTVs6pJh2lWNweUWeNMqrjFd15u2QsRsYjsG/lfT2V/S/YYgxOAm4ArU/mVwD0RcSLZGHkbU/kC4KqIeD3wHPDnqfxS4ORUz8eKOjiz0eaRJMwaRNKLETG1SvkW4IyI2JwG7P1tRMyU9CxwdETsSeXbIuIoSX3A7PzwOukRKXekh8kh6bNAe0R8WdLPgRfJhqr5UUS8WPChmo0KX0GZlUMMMz3cOtXkx4Pby+BnzO8je1L0KcC63EjcZqXmgDIrh/NyP+9N0/9MNqo6wAXAP6bpO4FLACS1pifgViWpBZgTEXeTPYjxCOCAqzizMvJfUmaNM0nS+tz8zyNioKt5h6T7yf5oPD+VfRy4TtJnyJ5w+9FU/glgpaSLya6ULiF74ms1rcB3JU0ne7DcFenx7Wal58+gzJosfQbVFRHPNrstZmXiW3xmZlZKvoIyM7NS8hWUmZmVkgPKzMxKyQFlZmal5IAyM7NSckCZmVkp/X/aSIidJzbOAAAAAUlEQVQtbechnQAAAABJRU5ErkJggg==\n", "text/plain": [ - "" + "
    " ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], @@ -289,18 +284,20 @@ "name": "stdout", "output_type": "stream", "text": [ - "Intercept: 0.24\n", - "Slope: 0.82\n" + "Intercept: 0.82\n", + "Slope: 0.24\n" ] }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfcAAAFkCAYAAAA9h3LKAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAPYQAAD2EBqD+naQAAIABJREFUeJzt3Xl0VeW9//H3I3VWKPXWoWq9XglcUJc10YpXkWtNQXEq\n1qsGpFpFBSekDpU619r6q7XW2mqdllZT49RaqwWxaJ2oqBBURGgSrEqdBQkWRCE8vz+eVAVROclJ\nds4+79daWSabM3zdkHzy3fsZQowRSZKUH2tkXYAkSSouw12SpJwx3CVJyhnDXZKknDHcJUnKGcNd\nkqScMdwlScoZw12SpJwx3CVJyhnDXZKknCk43EMIXwkh3BxCeDuEsDiE8EwIobIjipMkSYX7QiEP\nDiF8EZgMPAAMBt4GKoB3il+aJElqi1DIxjEhhIuBXWOMAzuuJEmS1B6FXpbfH5gaQrg9hPBGCKE+\nhDCyIwqTJEltU2jn/h4QgUuBO4GvA5cDx8UYb17F4zciXb5/EVhShHolSSoX6wD/CUyMMc4r5ImF\nhvv7wJMxxgEfO3Y5sFOMcbdVPH4Y8LtCCpIkSSsYHmO8pZAnFDSgDngNmLXSsVnAQZ/y+BcBamtr\n6du3b4FvlT9jx47lsssuy7qMzHkePuK5SDwPH/FcJJ4HmDVrFocffji0ZmkhCg33yUCflY71AV76\nlMcvAejbty+Vlc6W69Gjh+cBz8PHeS4Sz8NHPBeJ52EFBd/WLnRA3WVA/xDCuBDCNq2X3UcCvyr0\njSVJUscoKNxjjFOBoUANMAM4CxgTY7y1A2qTJEltUOhleWKM44HxHVCLJEkqAteW70Q1NTVZl9Al\neB4+4rlIPA8f8Vwknof2KWgqXMEvntacnzZt2jQHRkiSVID6+nqqqqoAqmKM9YU8185dkqScMdwl\nScoZw12SpJwx3CVJyhnDXZKknDHcJUnKGcNdkqScMdwlScoZw12SpJwx3CVJyhnDXZKknDHcJUnK\nGcNdkqScMdwlScoZw12SpJwx3CVJyhnDXZKknDHcJUnKGcNdkqScMdwlScoZw12SpJwx3CVJyhnD\nXZKknDHcJUnKGcNdkqScMdwlScoZw12SpJwx3CVJyhnDXZKknDHcJUnKGcNdkqScMdwlScoZw12S\npJwx3CVJyhnDXZKknDHcJUldT4xZV1DSDHdJUtfx7rswZgyMG5d1JSXNcJckdQ333gvbbgvXXQeb\nbpp1NSXNcJckZev11+HQQ2H//aFfP3juOTjllKyrKmkFhXsI4bwQwvKVPp7vqOIkSTkWY+rS+/aF\nBx+E2lqYMAG23jrrykreF9rwnOeAvYDQ+vWy4pUjSSoLf/87HHccPPwwHHEEXHopbLQRAA0NDcyZ\nM4devXpRUVGRcaGlqS2X5ZfFGN+KMb7Z+jG/6FVJkvLpgw/gRz+CHXaAuXPhL3+BG2+EjTZi/vz5\n7L33vvTp04chQ4bQu3dv9t57X955552sqy45bQn3ihDCKyGEOSGE2hDClkWvSpKUP48/DpWVcP75\n6Z76jBlQXf3hHw8bNoJJk6YAtcDLQC2TJk2hpubwjAouXYWG+xTgSGAwMArYGngkhLB+keuSJOXF\nwoVw4omw226w3nowbRpcfHH6vFVDQwMTJ46npeWXwHBgS2A4LS2XM3HieBobG7OqviQVdM89xjjx\nY18+F0J4EngJOAS44dOeN3bsWHr06LHCsZqaGmpqagp5e0lSqfnTn+D442HBAvj5z+Gkk6Bbt088\nbM6cOa2f7bHSnwwEoKmpKdf33+vq6qirq1vhWHNzc5tfry0D6j4UY2wOITQAvT7rcZdddhmVlZXt\neStJUil57bUU5L//PQwZAldeCVtt9akP32abbVo/e4TUuf/bwwD06vWZMVPyVtXw1tfXU1VV1abX\na9c89xDCBqRgf609ryNJyonly+Gaa9L0tkcfhVtvTYvTfEawA/Tu3ZvBg4fQrdvJpHvuc4FaunUb\nw+DBQ3LdtXeEQue5XxJC2COEsFUI4X+Au4ClQN3nPFWSlHezZ8P//m+a4vbtb8OsWWlxmhA+96kA\ndXW1VFf3B0YAXwVGUF3dn7q62g4sOp8KvSy/BXALsBHwFvAY0D/GOK/YhUmSSsT776cBcj/+cerQ\nH3wQ9tyz4Jfp2bMn9933ZxobG2lqanKeezsUOqDOEXCSpI9MngzHHAONjXDGGXD22bDuuu16yYqK\nCkO9nVxbXpJUuOZmGD0adt8duneH+nq46KJ2B7uKo12j5SVJZeiuu9K89YUL4YorUsivYnqbsmPn\nLklaPa+8AkOHwkEHQVUVPP98CnmDvcsx3CVJn2358jRPvW9fmDIF7rgD7r4btnT18a7KcJckfbqZ\nM2HAADjhBDjssNStH3zwak9vUzYMd0nSJy1ZAueeCzvuCPPmpa1Zr7kGevbMujKtBgfUSZJW9Mgj\ncOyx8MILMG5c+lhnnayrUgHs3CVJyYIFKdQHDoSNNoLp0+GCCwz2EmTnLknlLka48044+WRYtCgN\nnjvuOFjD/q9U+TcnSeVs7lw48EA45BDo3z+tBz96tMFe4vzbk6Ry1NKSFqDp1w+mTk1bs951F2y+\nedaVqQgMd0kqNzNmwG67pcvwhx+euvWDDsq6KhWR4S5J5WLJEjjrLKisTEvHPvooXHUV9OiRdWUq\nMgfUSVI5+Otf0yC5l15KO7edeSasvXbWVamD2LlLUp7Nnw9HHw3f+AZssgk8/TScd57BnnN27pKU\nRzHCbbfBmDHpcvxvfpP2XXcUfFnwb1mS8ubll2H//aGmJq0LP2uW89bLjH/TkpQXLS1w+eVpetvT\nT8Mf/5gWp/nKV7KuTJ3McJekPHjmGdh1Vxg7Fo48Mu3eduCBWVeljBjuklTK3nsvbexSVQWLF8Pk\nyfCrX0H37llXpgw5oE6SStUDD6R76XPnwvnnwxlnwFprZV2VugA7d0kqNfPmpUvv1dWwxRbw7LNp\n7rrBrlZ27pJUKmKEujo45RRYuhSuvRaOOspR8PoE/0VIUil48UUYMgSGD4c990zT20aONNi1Sv6r\nkKSubNkyuPRS2HZbmDkT7rknLU6z6aZZV6YuzHCXpK5q+vS0x/rpp6cufeZM2G+/rKtSCTDcJamr\nWbw4BfrOO8MHH8CUKWlxmg03zLoylQgH1ElSV3L//TBqFLz6Klx4IZx2Gqy5ZtZVqcTYuUtSV/DW\nWzBiBAweDFtvDTNmpMVpDHa1gZ27JGUpRrj5Zvje92D5crjhBjjiCAgh68pUwuzcJSkrL7yQOvUj\njoBBg2D27LQ4jcGudjLcJamzLVsGP/0pbLcdNDTA+PFwyy2w8cZZV6acMNwlqTNNnZpGwY8blwbO\nPfcc7LNP1lUpZwx3SeoM//pXuq++yy7pPvuUKfDzn8MGG2RdmXLIAXWS1NEmTIDRo+GNN+AnP0l7\nrjsKXh3Izl2SOsqbb8KwYWlN+IqKdAn+jDMMdnU4O3dJKrYY4cYb4dRT08Yuv/1tmsPuKHh1Ejt3\nSSqmpqa0z/pRR8G++6bd277zHYNdncpwl6RiWLoULr4Ytt8+zV+/7760OM2Xv5x1ZSpDhrsktdeT\nT8JOO8FZZ8GJJ6Z764MHZ12Vyli7wj2EcGYIYXkI4efFKkiSSsa778Ipp6RtWb/wBXjqKbjkElh/\n/awrU5lr84C6EMLOwLHAM8UrR5JKxJ//nKa3zZuXAn3MmBTwUhfQps49hLABUAuMBBYUtSJJ+hwN\nDQ1MmDCBxsbGzn/z11+HQw+F/faDfv3SJfhTTzXY1aW09bL8r4F7YowPFrMYSfos8+fPZ++996VP\nnz4MGTKE3r17s/fe+/LOO+90/JvHCNdfD337woMPQm1tWpxm6607/r2lAhUc7iGEw4CvAeOKX44k\nfbphw0YwadIU0oXDl4FaJk2aQk3N4R37xg0NsOeeMHIkHHhg2r1t+HCnt6nLKug6UghhC+AXQHWM\ncWnHlCRJn9TQ0MDEieNJwT689ehwWloiEyeOoLGxkYqKiuK+6QcfpPvpF14Im28Of/lLmsMudXGF\n3iSqAr4M1Ifw4a+s3YA9QggnAmvHGOPKTxo7diw9evRY4VhNTQ01NTVtKFlSOZozZ07rZ3us9CcD\nAWhqaipuuE+ZAscckxahOe00OPdcWG+94r2+9DF1dXXU1dWtcKy5ubnNrxdWkcWf/uAQ1ge2Wunw\njcAs4OIY46yVHl8JTJs2bRqVlZVtLlKSGhoa6NOnDyt27rR+PYKGhobihPvChfCDH8CVV6a569de\nCzvs0P7XlQpUX19PVVUVQFWMsb6Q5xbUuccYFwHPf/xYCGERMG/lYJekYurduzeDBw9h0qSTaWmJ\npI79Ybp1G0N19ZDiBPuf/gTHHw8LFqTtWE86Cbp1a//rSp2sGCvUrX7rL0ntUFdXS3V1f2AE8FVg\nBNXV/amrq23fC7/2Ghx8cBost8MOMHNmWpzGYFeJavfEzBjjN4pRiCR9np49e3LffX+msbGRpqYm\nevXq1b6OfflyuO66tA3r2mvDrbfCIYc4Cl4lz1UXJJWcioqK9l+Gnz0bjj0WHn007eB2ySXwpS8V\np0ApY24cI6m8vP8+XHBBuvz++utpQZrrrzfYlSt27pLKx+TJaXpbY2O6FH/22bDuullXJRWdnbuk\n/GtuTpu87L47dO8O9fVw0UUGu3LLzl1Svt11V9pjfeFC+OUv01Q3R8Er5+zcJeXTK6/A0KFw0EFQ\nWQnPP++8dZUNw11SvixfnlaX69sXHn8cbr89LU6z5ZZZVyZ1GsNdUn7MnAkDBsAJJ8Bhh6V14f/v\n/5y3rrJjuEsqfUuWpI1ddtwR3n4bHnoIrrkGevbMujIpEw6ok1TaHn00TW974QU488y06cs662Rd\nlZQpO3dJpWnBAjjuONhjj7QAzfTp8MMfGuwSdu6SSk2M8Pvfp5HvixbBr38No0bBGvYq0r/53SCp\ndMydC9/6Vhokt8suaXrb8ccb7NJK/I6Q1PW1tMCvfgX9+sFTT6XO/Y9/hC22yLoyqUsy3CV1bTNm\npGVjTzoJDj88TW876KCsq5K6NMNdUte0ZEna2KWyMq0N/+ijcNVV0KNH1pVJXZ4D6iR1PQ89lPZa\nf+mlFPBnnglrr511VVLJsHOX1HXMnw8jR8Kee8Imm8DTT8N55xnsUoHs3CVlL8a0BvzJJ6fL8b/5\nTVqYxlHwUpv4nSMpWy+/DPvvn9aCHzAgDZg77jiDXWoHv3skZaOlBS6/PE1ve/rpNLXtzjvhK1/J\nujKp5BnukjrfM8/ArrvC2LFw5JFpMZoDD8y6Kik3DHdJnee992DcOKiqgsWLYfLktDhN9+5ZVybl\nigPqJHWOBx5I99LnzoXzz4czzoC11sq6KimX7Nwldax589Kl9+rqtFzss8+muesGu9Rh7NwldYwY\noa4OTjkFli6Fa6+Fo45yFLzUCfwuk1R8L74IQ4bA8OFpQZpZs9LiNAa71Cn8TpNUPMuWwaWXwrbb\nwsyZcM89cNttsOmmWVcmlRXDXVJxTJ8O/fvD6aenLn3mTNhvv6yrksqS4S6pfRYvToG+887wwQcw\nZUpanGbDDbOuTCpbDqiT1Hb33w+jRsGrr8KFF8Jpp8Gaa2ZdlVT27NwlFe6tt2DECBg8GLbeGmbM\nSIvTGOxSl2DnLmn1xQg33wzf+x4sXw433ABHHAEhZF2ZpI+xc5e0el54IXXqRxwBgwbB7NlpcRqD\nXepyDHdJn23ZMvjpT2G77aChAcaPh1tugY03zroySZ/CcJf06aZOTaPgx42D0aPhuedgn32yrkrS\n5zDcJX3Sv/6V7qvvskv6+okn0uI0G2yQbV2SVosD6iStaMKE1KW/8Qb85Cdpz3VHwUslxc5dUvLm\nmzBsWFoTvqIiXYI/4wyDXSpBdu5SuYsRbrwRTj01bexy001w+OGOgpdKmJ27VM6amtI+60cdldaB\nnzUrLU5jsEslraBwDyGMCiE8E0Jobv34Wwhh744qTlIHWbo03U/ffnv4xz9g4sTUsX/5y1lXJqkI\nCu3c5wLfByqBKuBB4O4QQt9iFyapgzzxBFRVwTnnwEknpaVjBw36xMMaGhqYMGECjY2NGRQpqT0K\nCvcY459jjPfFGOfEGJtijGcD/wL6d0x5korm3XdhzBjYddc0SO6pp9LiNOuvv8LD5s+fz95770uf\nPn0YMmQIvXv3Zu+99+Wdd97JqHBJhWrzPfcQwhohhMOA9YDHi1eSpKK7917Ydlu47jr42c9S977j\njqt86LBhI5g0aQpQC7wM1DJp0hRqag7vzIoltUPBo+VDCNuRwnwd4F1gaIxxdrELk1QEr7+euvXb\nb0/rwl91VdrF7VM0NDQwceJ4UrAPbz06nJaWyMSJI2hsbKSioqIzKpfUDm2ZCjcb2AHoARwM3BRC\n2OOzAn7s2LH06NFjhWM1NTXU1NS04e0lfa4Y4frr4fTT0yX43/0Oamo+dxT8nDlzWj/bY6U/GQhA\nU1OT4S51gLq6Ourq6lY41tzc3ObXCzHGdhUUQvgL0BRjHL2KP6sEpk2bNo3Kysp2vY+k1fT3v8Nx\nx8HDD6dd2372M9hoo9V6akNDA3369GHFzp3Wr0fQ0NBguEudpL6+nqqqKoCqGGN9Ic8txjz3NYC1\ni/A6ktrjgw/gRz+CHXaAf/4TJk1K+62vZrAD9O7dm8GDh9Ct28mkQJ8L1NKt2xgGDx5isEslotB5\n7j8OIQwIIWwVQtguhPAT0vW62o4pT9JqefxxqKyE889Pa8HPmAF77dWml6qrq6W6uj8wAvgqMILq\n6v7U1fltLpWKQu+5bwz8FtgMaAaeBQbFGB8sdmGSVsPChfCDH8CVV8JOO8G0aalzb4eePXty331/\nprGxkaamJnr16mXHLpWYgsI9xjiyowqRVKC774YTToAFC+Cyy+DEE6Fbt6K9fEVFhaEulSjXlpdK\nzauvwsEHw7e+lbr0mTPTdLciBruk0ma4S6Vi+XK4+mro1w8efRRuvTUtTrPVVllXJqmLMdylUjBr\nFgwcCKNGwbe/nb4+9FB3b5O0Soa71JW9/z5ccAF87Wvwxhvw4INpcZovfSnryiR1YW1ZoU5SZ3js\nMTj2WGhshDPOgLPPhnXXzboqSSXAzl3qapqbYfRoGDAAuneH+nq46CKDXdJqs3OXupI//CFNaXv3\nXbjiihTyjoKXVCA7d6kreOUVGDo0DZbbaSd4/vmiz1uXVD4MdylLy5en1eX69oUpU+COO9LiNFtu\nmXVlkkqY4S5lZebMdF/9hBPgsMNSt37wwU5vk9RuhrvU2ZYsgXPPhR13hHnz0tas11wDPXtmXZmk\nnHBAndSZHnkkTW974QUYNy59rLNO1lVJyhk7d6kzLFiQQn3gwLS/+vTpaXEag11SB7BzlzpSjHDn\nnXDyybBoURo8d9xxsIa/V0vqOP6EkTrK3Llw4IFwyCHQv39aD370aINdUofzp4xUbC0taQGafv1g\n6tS0MM1dd8Hmm2ddmaQyYbhLxTRjBuy2W7oMP2JE6taHDs26KkllxnCXimHJEjjrLKishIUL06Yv\nV14JPXpkXZmkMuSAOqm9/vrXNEjupZfgnHPg+9+HtdfOuipJZczOXWqr+fPh6KPhG9+ATTaBp59O\ni9MY7JIyZucuFSpGuO02GDMG3n8frr4aRo50FLykLsOfRlIhXnoJ9tsPampgjz3SgLljjzXYJXUp\n/kSSVkdLC/ziF7DttvDMM2nntjvugM02y7oySfoEw136PM88kxah+d734LvfTbu3HXBA1lVJ0qcy\n3KVP8957cOaZUFWVPp88OS1O07171pVJ0mdyQJ20KpMmwahR8M9/pg1eTj8d1lor66okabXYuUsf\nN28eHHkkfPObsMUW8OyzaXEag11SCbFzlyBNb7vlFjjlFFi2DK67Do46CkLIujJJKpidu/SPf8A+\n+8Dhh8Nee6XpbUcfbbBLKlmGu8rXsmVw6aWw3XZpBPy998Ktt8Kmm2ZdmSS1i+Gu8lRfD7vskgbK\njRwJM2fCvvtmXZUkFYXhrvKyaFEK9K9/HZYuhSlT4PLLYcMNs65MkorGAXUqH/ffn6a3vfoqXHgh\nnHYarLlm1lVJUtHZuSv/3noLRoyAwYNh661hxgwYN85gl5Rbdu7Krxjh5pvTsrHLl8MNN8ARRzgK\nXlLu2bkrn+bMgUGDUpgPGgSzZ6fFaQx2SWXAcFe+LFsGP/0pbL89NDbC+PFpcZqNN866MknqNIa7\n8mPqVNh553Q/ffRoeO65tDiNJJUZw12l71//SvfVd9klff3EE2lxmg02yLYuScqIA+pU2iZMSF36\nm2/CxRenteEdBS+pzNm5qzS9+SYMGwZDhkBFRZredvrpBrskUWC4hxDGhRCeDCEsDCG8EUK4K4TQ\nu6OKkz4hxjSl7b//Oy1Kc9NN6b/bbJN1ZZLUZRTauQ8ArgB2AaqBNYH7QwjrFrswlZ+GhgYmTJhA\nY2Pjqh/Q1ATV1Wkr1v32S7u3jRjh9DZJWklB99xjjEM+/nUI4UjgTaAKeKx4ZamczJ8/n2HDRjBx\n4vgPjw0ePIS6ulp69uyZ1oD/2c/ghz+EzTaDiRPT3HVJ0iq19577F4EIzC9CLSpTw4aNYNKkKUAt\n8DJQy6RJU6ipOTyNfK+qgnPOgZNOSvfWDXZJ+kxtHi0fQgjAL4DHYozPF68klZOGhobWjr0WGN56\ndDjrtixmn4nHEu+fQNhxR3jqKdhxxwwrlaTS0Z6pcFcC/YDdPu+BY8eOpUePHiscq6mpoaamph1v\nrzyYM2dO62d7fHhsX+7lSs5nI2D20UfT96qr4AvO2pSUX3V1ddTV1a1wrLm5uc2vF2KMhT8phF8B\n+wMDYowvf8bjKoFp06ZNo7Kyss1FKr8aGhro06cPUMsm7MXljOFQbuc+tmc0M7i/oYGKioqsy5Sk\nTldfX09VVRVAVYyxvpDnFnzPvTXYDwT2/Kxgl1ZH7969GTxoH44JxzKLbdiTBxjGaPZb45/0GTzE\nYJekNijoWmcI4UqgBjgAWBRC2KT1j5pjjEuKXZzKwN//zj2LFrJmXMwNwGksZj5XMfibabS8JKlw\nhd7IHEUaHf/QSse/C9xUjIJUJj74IO3e9qMfseYWW8CkSez+1a9S29REr1697NglqR0KnefucrVq\nv8cfh2OOSXusn346nHsurLsuFWCoS1IRGNbqPAsXwoknwm67wXrrwbRp8JOfwLoucChJxeT8InWO\nu++GE06ABQvgsstSyHfrlnVVkpRLdu7qWK++CgcfDN/6FuywA8ycCWPGGOyS1IEMd3WM5cvh6quh\nXz949FG49Va4917YaqusK5Ok3DPcVXyzZsHAgTBqFHz72+nrQw919zZJ6iSGu4rn/ffhggvga1+D\nN96ABx+E66+HL30p68okqaw4oE7FMXlymt7W2AhnnAFnn+0oeEnKiJ272qe5GUaPht13h+7dob4e\nLrrIYJekDNm5q+3uuitNaVu4EK64IoW8o+AlKXN27ircK6/A0KFw0EFQVQXPP++8dUnqQgx3rb7l\ny+HKK6FvX5gyBe64Iy1Os+WWWVcmSfoYw12rZ+ZMGDAgrTJ32GGpWz/4YKe3SVIXZLjrsy1ZkjZ2\n2XFHmDcPHn4YrrkGevbMujJJ0qdwQJ0+3SOPwLHHwgsvwLhx6WOddbKuSpL0Oezc9UkLFqRQHzgQ\nNtoIpk9Pi9MY7JJUEuzc9ZEY4c474eSTYdGiNHjuuONgDX8HlKRS4k9tJXPnwoEHwiGHQP/+aT34\n0aMNdkkqQf7kLnctLWkBmn79YOpU+MMf0uI0m2+edWWSpDYy3MvZjBmw227pMvyIEalbHzo066ok\nSe1kuJejJUvgrLOgsjItHfvYY+n+eo8eWVcmSSoCB9SVm4ceSiPhX3oJzjkHvv99WHvtrKuSJBWR\nnXu5mD8fRo6EPfeETTaBp59Oi9MY7JKUO3bueRcj3H57uq/+/vtw9dUp5B0FL0m55U/4PHv5Zdh/\n/7QW/B57pAFzxx5rsEtSzvlTPo9aWuDyy9P0tqefTju33XEHbLZZ1pVJkjqB4Z43zzwDu+4KY8fC\nd7+bdm874ICsq5IkdSLDPS/eey9t7FJVBYsXw+TJaXGa7t2zrkyS1MkcUJcHDzyQ1oCfOxfOPx/O\nOAPWWivrqiRJGbFzL2Xz5sGRR0J1NWyxBTz7LJx9tsEuSWXOzr0UxQh1dXDKKbB0KVx7LRx1lKPg\nJUmAnXvpefFFGDIEhg9PC9LMmuW8dUnSCkyEUrFsGVx6KWy7LcycCffcA7fdBptumnVlkqQuxnAv\nBdOnpz3WTz89dekzZ8J++2VdlSSpizLcu7LFi1Og77wzfPABTJmSFqfZcMOsK5MkdWEOqOuq/vKX\nNL3t1VfhwgvhtNNgzTWzrkqSVALs3Luat9+G73wHBg2CrbeGGTPS4jQGuyRpNdm5dxUxQm1tWjZ2\n+XK44QY44ggIIevKJEklxs69K3jhBRg8+KOOffbstDiNwS5JagPDPUvLlsEll8B220FDA4wfD7fc\nAhtvnHVlkqQSZrhnZdq0NAr+zDNh9Gh47jnYZ5+sq5Ik5UDB4R5CGBBC+FMI4ZUQwvIQgvuJFmLR\nIjj1VPj619PXTzyRFqfZYINs65Ik5UZbOvf1gaeB44FY3HJy7r770gpzV10FF18MTz4JO+2UdVWS\npJwpeLR8jPE+4D6AEBzxtVpaWlg4dCjd77mHxf/zP6z3wAOwzTZZVyVJyinvuXew+fPns/e+B3DN\nPffwHWD9v/2NvU84mXfeeSfr0iRJOWW4d7Bhw0YwadIUTqeWm3kZqGXSpCnU1ByedWmSpJxyEZsO\n1NDQwMSJ44FaYHjr0eG0tEQmThxBY2MjFRUVGVYoScqjTgn3sWPH0qNHjxWO1dTUUFNT0xlvn5k5\nc+a0frbHSn8yEICmpibDXZJEXV0ddXV1Kxxrbm5u8+t1SrhfdtllVFZWdsZbdSnbfDho7hE+6twB\nHgagV69enV2SJKkLWlXDW19fT1VVVZter+BwDyGsD/QC/j1S/r9CCDsA82OMc9tURU717t2bwYOH\nMGnSybRE0cGiAAAHHUlEQVS0RFLH/jDduo2hunqIXbskqUO0ZUDdTsB0YBppnvulQD1wQRHryo26\nulqqq/sDI4CvAiOoru5PXV1txpVJkvKqLfPcH8ZR9qutZ8+e3Hffn2lsbKSpqYlevXrZsUuSOpSj\n5TtJRUWFoS5J6hR24JIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhju\nkiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk\n5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM\n4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOHe\nierq6rIuoUvwPHzEc5F4Hj7iuUg8D+3TpnAPIZwQQvhHCOG9EMKUEMLOxS4sj/zHmngePuK5SDwP\nH/FcJJ6H9ik43EMIhwKXAucBOwLPABNDCP9R5NokSVIbtKVzHwtcHWO8KcY4GxgFLAaOKmplkiSp\nTQoK9xDCmkAV8MC/j8UYIzAJ2LW4pUmSpLb4QoGP/w+gG/DGSsffAPqs4vHrAMyaNavwynKoubmZ\n+vr6rMvInOfhI56LxPPwEc9F4nlYITvXKfS5ITXeq/ngEDYDXgF2jTE+8bHj/w/YI8a460qPHwb8\nrtCiJEnSh4bHGG8p5AmFdu5vAy3AJisd3wR4fRWPnwgMB14ElhT4XpIklbN1gP8kZWlBCurcAUII\nU4AnYoxjWr8OwMvAL2OMlxRagCRJKq5CO3eAnwM3hhCmAU+SRs+vB9xYxLokSVIbFRzuMcbbW+e0\n/5B0Of5pYHCM8a1iFydJkgpX8GV5SZLUtbm2vCRJOWO4S5KUMx0a7m4wAyGEASGEP4UQXgkhLA8h\nHJB1TVkIIYwLITwZQlgYQngjhHBXCKF31nV1thDCqBDCMyGE5taPv4UQ9s66rqyFEM5s/f74eda1\ndLYQwnmt/+8f/3g+67qyEkL4Sgjh5hDC2yGExa3fL5VZ19WZWnNz5X8Ty0MIV6zua3RYuLvBzIfW\nJw06PB4o5wEOA4ArgF2AamBN4P4QwrqZVtX55gLfBypJSzk/CNwdQuibaVUZav2l/1jSz4hy9Rxp\ngPKmrR+7Z1tONkIIXwQmA+8Dg4G+wKnAO1nWlYGd+OjfwqbAN0n5cfvqvkCHDaj7lPnwc0nz4X/a\nIW/axYUQlgPfijH+Ketastb6S96bpJUNH8u6niyFEOYBp8UYb8i6ls4WQtgAmAaMBs4BpscYv5dt\nVZ0rhHAecGCMsay601UJIVxMWgF1YNa1dCUhhF8AQ2KMq321s0M6dzeY0Wr4Iuk30flZF5KVEMIa\nIYTDSOtEPJ51PRn5NXBPjPHBrAvJWEXrrbs5IYTaEMKWWReUkf2BqSGE21tv39WHEEZmXVSWWvN0\nOHB9Ic/rqMvyn7XBzKYd9J4qEa1XcX4BPBZjLLt7iyGE7UII75IuPV4JDG3dPrmstP5i8zVgXNa1\nZGwKcCTpMvQoYGvgkRDC+lkWlZH/Il3F+TswCLgK+GUIYUSmVWVrKNAD+G0hT2rLCnVSe10J9AN2\ny7qQjMwGdiB9wx4M3BRC2KOcAj6EsAXpF7zqGOPSrOvJUozx4+uGPxdCeBJ4CTgEKLdbNWsAT8YY\nz2n9+pkQwnakX3puzq6sTB0FTIgxrmr/lk/VUZ17oRvMqEyEEH4FDAH+N8b4Wtb1ZCHGuCzG+EKM\ncXqM8SzSQLIxWdfVyaqALwP1IYSlIYSlwEBgTAjhg9arO2UpxtgMNAC9sq4lA68BK+8RPgv4aga1\nZC6E8FXSAORrC31uh4R762/i04C9/n2s9Zt1L+BvHfGe6vpag/1AYM8Y48tZ19OFrAGsnXURnWwS\nsD3psvwOrR9TgVpgh1jGS2e2DjLsRQq6cjMZ6LPSsT6kKxnl6CjS7ezxhT6xIy/Lu8EM0HrfrBfw\n707kv0IIOwDzY4xzs6usc4UQrgRqgAOARSGEf1/VaY4xls12wCGEHwMTSDspbkgaKDOQdH+xbMQY\nFwErjLcIISwC5sUYV+7cci2EcAlwDynANgcuAJYCdVnWlZHLgMkhhHGkaV+7ACOBYzKtKgOtDfGR\nwI0xxuWFPr/Dwt0NZj60E/BX0sjwSJr7D2lwxFFZFZWBUaT//4dWOv5d4KZOryY7G5P+7jcDmoFn\ngUGOFgfKdx2ILYBbgI2At4DHgP4xxnmZVpWBGOPUEMJQ4GLS1Mh/AGNijLdmW1kmqoEtaeO4CzeO\nkSQpZ1xbXpKknDHcJUnKGcNdkqScMdwlScoZw12SpJwx3CVJyhnDXZKknDHcJUnKGcNdkqScMdwl\nScoZw12SpJz5/7mTdL4Po2m9AAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAW4AAAD8CAYAAABXe05zAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4xLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvDW2N/gAAGxJJREFUeJzt3X2QlfV5//H3hREtocWh0oZ2gzsZ/P3Cgwi4Ai2QwWAlpJLpdEhHsk3EBx5CSswEcapgiygC9QkpLmQBI4GDCslumLDgAArLr7MruNBdtkAKHYXVrj956FB5GgK73/5xLTVGZM/C3uc+9zmf1wxz9tycPec64/iZi+/9fbAQAiIikhwd4i5ARETaRsEtIpIwCm4RkYRRcIuIJIyCW0QkYRTcIiIJo+AWEUkYBbeISMIouEVEEuYLUbzpjTfeGAoLC6N4axGRnLRr165jIYRu6bw2kuAuLCykpqYmircWEclJZnY43ddqqEREJGEU3CIiCaPgFhFJGAW3iEjCKLhFRBJGwS0ikjAKbhGRhFFwi4hcrY8/hmnT4MMPM/JxCm4Rkauxfj306QMvvACbN2fkIxXcIiJX4qOP4J57YMwYuOEGqK6G730vIx+t4BYRaYsQ4JVXoFcvKC+HJ5+EXbtg8OCMlZDWXiVmdgg4CTQBF0IIRVEWJSKSld59FyZNgi1bYNgwWLoUvvrVjJfRlo77jhBCf4W2iOSdCxfgueegb1/YsQMWL4bKyv8N7VQKCguhQwd/TKWiLSeS3QFFRHJGbS08+KAPh3zrW/DSS1BQ8L9/nUrBxIlw5ow/P3zYnwMUF0dTUroddwA2mdkuM5sYTSkiIlnk7Fl49FEoKoIPPoA1a+CXv/xUaAPMmPFJaF905oxfj0q6HffQEEKjmf0RsNnMfh1C2P7bL2gJ9IkAPXr0aOcyRUQyaNs2mDAB/uM/4P774ZlnoGvXS760oeHSb/F519tDWh13CKGx5fEIUA4MusRrSkMIRSGEom7d0jrEQUQku5w44YF9xx3Q3Ow3IZcv/9zQBvi8PjXK/rXV4DazL5rZ71/8GbgL+LfoShIRiUFZmU/x++lP4ZFHoL4eRo5s9dfmzIFOnT59rVMnvx6VdIZK/hgoN7OLr18dQngjupJERDKosRH+7u98TvaAAVBRAQMHpv3rF29AzpjhwyM9enhoR3VjEsBCCO3+pkVFRUFnTopIVmtuhmXLYPp0+M1v4Ikn4Mc/hi/EM9nOzHalO91a0wFFJP/8+7/7nL3t2308u7QUevaMu6q0acm7iOSP8+fh6afh1lthzx6/8fjmm4kKbVDHLSL54p13fCHNnj3w7W/DwoXwpS/FXdUVUcctIrnt9GnfK3vIEDh2zBfRrFmT2NAGddwikss2bfJNoQ4dgu9/H+bOhS5d4q7qqqnjFpHcc/w43HsvjBoF113nNyFLSnIitEHBLSK5JAR49VVfSLN6Ncyc6ZtEDR8ed2XtSkMlIpIbGhp8OGTDBhg0yGeL3HJL3FVFQh23iCRbUxP88z/7uY/btsGCBVBVlbOhDeq4RSTJ9u71KX5vv+3j2UuW+EkGOU4dt4gkz7lzMGuW7y1y8CCsWgUbN+ZFaIM6bhFJmqoq77L37/ednF54AfJsK2l13CKSDB9/7Lv4DRvmi2o2bPBOO89CGxTcIpIE69f7zceSEvjhD31se/TouKuKjYJbRLLXRx/BPffAmDFwww1QXe2zRjp3jruyWCm4RST7hACvvOILacrL4ckn/ZT1wYPjriwr6OakiGSXd9/1/UW2bPHx7KVL4atfjbuqrKKOW0Syw4UL8Nxz0Lcv7Njh49mVlQrtS1DHLSLxq631KX67dvl4dkkJFBTEXVXWUsctIvE5exYefRSKiuD9932f7HXrFNqtUMctIvHYts3PfTx4EO6/H555Brp2jbuqRFDHLSKZdeIETJjgh/Q2NflNyOXLFdptoOAWkcwpK/Mpfi+/DNOnQ309jBwZd1WJo6ESEYleY6MvVy8vh/79oaICBg6Mu6rEUsctItFpbobSUu+yN26E+fNh506F9lVSxy0i0ThwwG8+Vlb6eHZpKfTsGXdVOUEdt4i0r/Pn4emnoV8/qKvzG49vvqnQbkfquEWk/bzzji+k2bMHvv1tWLgQvvSluKvKOeq4ReTqnT4N06bBkCFw7Bj88pe+mEahHQl13CJydTZt8k2hDh2CyZNh3jzo0iXuqnKaOm4RuTLHj8O99/ohvdddB9u3w+LFCu0MUHCLSNuEAK++6lP8Vq+GmTN9k6jhw+OuLG9oqERE0tfQAN//vp/3OGiQzxa55Za4q8o76rhFpHVNTbBokZ/7uG2bn6xeVaXQjok6bhG5vL17fVOo6mofz16yBAoL464qr6njFpFLO3cOZs2CAQN8FeTKlb5sXaEdO3XcIvJZVVW+kGb/figu9qGRbt3irkpaqOMWkU98/LHv4jdsmC+q2bABVq1SaGcZBbeIuPXr/eZjSQlMnepj26NHx12VXELawW1m15jZv5rZ+igLEslnqZQPIXfo4I+pVAY+9MgRGDfOD+nt0sWHSV58ETp3zsCHy5VoS8f9ELA/qkJE8l0q5bugHj7sa1wOH/bnkYV3CLBihS+kKSuD2bNh927fb0SyWlrBbWYFwF8Cy6ItRyR/zZgBZ858+tqZM3693b37Ltx1F4wf78FdWwuPPw4dO0bwYdLe0u24FwCPAM2f9wIzm2hmNWZWc/To0XYpTiSfNDS07foVuXABnnsO+vaFHTt8PHv7dg9vSYxWg9vM7gaOhBB2Xe51IYTSEEJRCKGom+5Ai7RZjx5tu95mtbU+DPLww3DnnbBvny9f76A5CkmTzn+xocC3zOwQ8BrwdTNbFWlVInlozhzo1OnT1zp18utX5exZePRRKCqC99+H11+HdeugoOAq31ji0mpwhxAeDSEUhBAKgXuAt0IIfxt5ZSJ5prjYj2W86SYw88fSUr9+xSor4dZbfY/s733PF9T8zd/4B0hiaeWkSBYpLr7KoL7oxAl45BFYuhS+8hXYsgVGjmyHN5Zs0KbBrRDCthDC3VEVIyLtoKzMbzYuXw7Tp0N9vUI7x6jjFskVjY2+XL28HPr395WQt90Wd1USAd1OFkm65mYfDO/d23fvmz8fdu5UaOcwddwiSXbggC+vrKyEESM8wG++Oe6qJGLquEWS6Px5ePpp6NcP6upg2TJ46y2Fdp5Qxy2SNO+843tl79kDY8fCwoXQvXvcVUkGqeMWSYrTp2HaNF/9eOyY34Rcu1ahnYfUcYskwaZNMGkSHDoEkyf7gpouXeKuSmKijlskmx0/Dvfe64f0duzoG0ItXqzQznMKbpFsFAK8+qovpFm92vd2rauD4cPjrkyygIZKRLJNQwNMmQIVFXD77b5cvV+/uKuSLKKOWyRbNDXBokV+7uPWrX6yenW1Qls+Qx23SDbYuxcmTPCgvusu+MlP/NBJkUtQxy0Sp3PnYNYsGDDAV0GuXAlvvKHQlstSxy0Sl6oqX0izfz985zuwYAHo9ChJgzpukUw7edJ38Rs2zBfVbNjgR7krtCVNCm6RTFq/3nfxKymBqVN9bHv06LirkoRRcItkwpEjMG4cjBnji2eqquDFF6Fz57grkwRScItEKQRYscIX0pSVwezZsHu37zcicoV0c1IkKu++6/uLbNkCQ4f6+Y+9esVdleQAddwi7e3CBXjuOejbF3bs8PHs7dsV2tJu1HGLtKe6Op/iV1Pj49klJVBQEHdVkmPUcYu0h7Nn4bHH/JzHhgZ4/XVYt06hLZFQxy1ytSorfbn6wYNw333w7LPQtWvcVUkOU8ctcqVOnPCDekeM8A2itmyBl19WaEvkFNwiV6KszBfSLF8O06dDfT2MHBl3VZInNFQi0haNjb5cvbwc+vf3lZADB8ZdleQZddwi6WhuhtJS77I3boT582HnToW2xEIdt0hrDhzwsezKSrjjDg/wnj3jrkrymDpukc9z/jzMnesn0NTV+Xj2m28qtCV26rhFLuWdd3whzZ49MHYsLFwI3bvHXZUIoI5b5NNOn4Zp03wTqGPH/Cbk2rUKbckq6rhFLtq0yTeFOnQIJk+GefN8C1aRLKOOW+T4cRg/HkaNguuu8w2hFi9WaEvWUnBL/goBXn3Vd+1LpWDmTKitheHD465M5LI0VCL5qaEBpkyBigq4/XZfrt6vX9xViaRFHbfkl6YmWLQI+vSBrVvhhRegulqhLYmijlvyx969votfdbWPZy9ZAoWFcVcl0mbquCX3nTsHs2bBgAG+CnLlSl+2rtCWhGq14zaz64HtwHUtr/95COEfoy5MpF1UVXmXvW8fFBf70Ei3bnFXJXJV0um4zwFfDyHcCvQHvmFmOqJastvJk76L37BhcOoUbNgAq1Z9JrRTKW+8O3Twx1QqlmpF2qTVjjuEEIBTLU+vbfkToixK5KpUVPgCmv/8T/jhD+Gpp6Bz58+8LJXyvaPOnPHnhw/7c/DmXCRbpTXGbWbXmFktcATYHELYEW1ZIlfgyBEYNw7uvhtuuMFvQi5YcMnQBpgx45PQvujMGb8uks3SCu4QQlMIoT9QAAwys76/+xozm2hmNWZWc/To0fauU+TzhQArVvhCmrIymD0bdu2CwYMv+2sNDW27LpIt2jSrJIRwAtgGfOMSf1caQigKIRR1080fyZT33vOpfePHe3DX1sLjj0PHjq3+ao8ebbsuki1aDW4z62ZmN7T8/HvAncCvoy5M5LIuXIDnn4e+feHtt6GkxPcY6dUr7beYMwc6dfr0tU6d/LpINktnAU53YIWZXYMH/ZoQwvpoyxK5jLo63yu7pgbGjPHQLiho89tcvAE5Y4YPj/To4aGtG5OS7dKZVbIHGJCBWkQu7+xZePJJ+Kd/gj/8Q1izxg85MLvitywuVlBL8mjJuyRDZaUvpDl4EO6/H555Brp2jbsqkVhoybtktxMnfHL1iBG+QdSWLX72o0Jb8piCW7JXWRn07u1BPX061NfDyJFxVyUSOw2VSPZpbPTl6uXl0L8/rF8PAwfGXZVI1lDHLdmjuRmWLvUue+NGmD8fdu5UaIv8DnXckh0OHPCx7MpKuOMOKC2Fnj3jrkokK6njlnidPw9z5/oJNHV1Pp795psKbZHLUMct8ampgQcegD17fD72woXQvXvcVYlkPXXcknmnT8O0ab4J1LFjfhNy7VqFtkia1HFLZm3aBJMmwaFDvmf2vHnQpUvcVYkkijpuyYzjx30Hv1Gj4LrrfEOoxYsV2iJXQMEt0QoBXnvNd+1LpWDmTN96dfjwuCsTSSwNlUh0GhpgyhQ/SmzQIJ8tcsstcVclknjquKX9NTfDokXQpw9s3eonq1dVKbRF2ok6bmlfe/f6Ln7V1T6evWSJH58uIu1GHbe0j3PnYNYsGDDAV0GuXOnL1hXaIu1OHbdcvaoq77L37fNTCV54AXTuqEhk1HHLlTt50nfxGzYMTp2CDRtg1SqFtkjEFNxyZSoqfBe/khKYOtXHtkePjrsqkbyg4Ja2OXIExo2Du+/2xTNVVfDii9C5c9yVieQNBbekJwRYscIX0pSVwezZsHs3DBkSd2UieUc3J6V1773n+4ts3gxDh/phB716xV2VSN5Sxy2f78IFeP556NsX3n7bx7O3b1doi8RMHbdcWl0dPPig75k9ZoyHdkFB3FWJCOq45XedPQuPPQa33eZ7jaxZA+vWKbRFsog6bvlEZaUvpDl4EO6/H555Brp2jbsqEfkd6rgFTpzwg3pHjICmJtiyxc9+VGiLZCUFd74rL/eFNMuXw/TpUF8PI0fGXZWIXIaGSvJVY6OveCwrg/79Yf16GDgw7qpEJA3quPNNc7PPw+7d2/cWmT8fdu5UaIskiDrufHLggI9lV1bCHXdAaSn07Bl3VSLSRuq488H58zB3LvTr5/Ozly/3Y8QU2iKJpI4719XUwAMPwJ49MHYsLFwI3bvHXZWIXAV13Lnq9GmYNg0GD4Zjx3z2yNq1Cm2RHKCOOxdt2uSbQh06BJMnw7x5vgWriOQEddy55PhxGD/eD+nt2NE3hFq8WKEtkmMU3LkgBHjtNd+1L5WCmTP9JuTw4XFXJiIR0FBJ0jU0wJQpfpTYoEE+W+SWW+KuSkQipI47qZqbYdEi6NMHtm71k9WrqhTaInmg1eA2sy+b2VYz229me83soUwUJpexd6+frD51qp9Is3cv/OhHcM01cVcmIhmQTsd9AZgWQugFDAF+YGa9oy0rv6VSUFgIHTr4YyrV8hfnzsGsWTBggK+CXLkSNm70F4lI3mh1jDuE8CHwYcvPJ81sP/CnwL6Ia8tLqZSvSj9zxp8fPuzPbzxYzai1D8K+fVBc7EMj3brFW6yIxKJNY9xmVggMAHZEUYzAjBmfhDZAZ04y78xU/uKJoXDqlG8MtWqVQlskj6Ud3GbWGfgF8KMQwseX+PuJZlZjZjVHjx5tzxrzSkPDJz9/kwr20Zsf8BKLmOpj2aNHx1eciGSFtILbzK7FQzsVQii71GtCCKUhhKIQQlE3dYNXrEcP6MYRVjOOCu7mv+nCn1PF8ze9CJ07x12eiGSBdGaVGLAc2B9CeD76kvJYCKwetYL99OKvKeMfeIKB7Ka+0xDmzIm7OBHJFul03EOB7wJfN7Palj/fjLiu/PPeezBqFH9eOp6m/9OLb/5JLU/ZP/AnN3WktNTvR4qIQHqzSv4FsAzUkp8uXPCtVh9/3Odhl5TwR5Mm8WYHrY0SkUvTkvc41dXBgw/6ntljxkBJCRQUxF2ViGQ5tXVxOHsWHnsMbrvNp5G8/jqsW6fQFpG0qOPOtMpKmDABDh6E++6DZ5+Frl3jrkpEEkQdd6acOOFLIEeMgKYm2LIFXn5ZoS0ibabgzoTycujd2w/pnT4d6uth5Mi4qxKRhNJQSZQaG30Hv7Iy6N8ffvUrH9cWEbkK6rij0NwMS5d6l71hA8yfDzt3KrRFpF2o425vBw74WHZlpY9nl5bCzTfHXZWI5BB13O3l/HmYOxf69fP52cuWwVtvKbRFpN2p424PNTXwwAOwZw+MHesrIbt3j7sqEclR6rivxunT8PDDMHgwHDvms0fWrlVoi0ik1HFfqc2bYdIk3xxq8mSYNw+6dIm7KhHJA+q42+r4cRg/Hu66C669FrZvh8WLFdoikjEK7nSFAK+9Br16+cGQM2b4Tcjhw+OuTETyjIZK0tHQAFOmQEUF3H67L1fv1y/uqkQkT6njvpzmZli0CPr0ga1b/WT16mqFtojESh3359m3z/fKrq728eyf/AQKC+OuSkREHfdnnDsHTzzhe4scOAArV8Ibbyi0RSRrqOP+bdXV3mXv2wff+Q4sWAA6sV5Esow6boCTJ30Xv6FD4dQp3xgqlVJoi0hWUnBXVPjNx5de8vDeuxdGj467KhGRz5W/QyVHjsBDD/nc7D59oKoKhgyJuyoRkVblX8cdAvzsZ76QpqwMZs+G3bsV2iKSGPnVcb/3nu8vsnmzj2cvXeoBLiKSIPnRcV+4AM8/D337wttvQ0mJ7zGi0BaRBMr9jruuzqf41dTAmDF+E/LLX467KhGRK5a7HffZs/DYY1BU5HuNvP46rFun0BaRxMvNjruyEiZMgIMH4b774NlnoWvXuKsSEWkXudVxnzjhNx9HjICmJr8J+fLLCm0RySm5E9zl5dC7tx/SO3061NfDnXfGXZWISLtL/lBJY6OveCwr842hfvUruO22uKsSEYlMcjvuEHwedu/evrfIvHmwc6dCW0RyXjI77gMHYOJEvwk5YgSUlsLNN8ddlYhIRiSr4z5/HubO9RNoamt9PPuttxTaIpJXktNx19T4Qpq6Ohg7FhYuhO7d465KRCTjsr/jPn0aHn4YBg+Go0d99sjatQptEclb2d1xb97s87Ivbg41fz506RJ3VSIiscrOjvv4cRg/3g/pvfZavwm5ZIlCW0SEbAvuEPxgg169/OiwGTN8TPtrX4u7MhGRrNFqcJvZy2Z2xMz+LcpCUimY0/U5GDeO2v8upGL2LnjqKbj++ig/VkQkcdIZ434FWAT8LKoiUimflt35zHf5iI689JsfcP1T11DaA4qLo/pUEZFkshBC6y8yKwTWhxD6pvOmRUVFoaamJu0iCgvh8OHPXr/pJjh0KO23ERFJLDPbFUIoSue17TbGbWYTzazGzGqOHj3apt9taGjbdRGRfNZuwR1CKA0hFIUQirp169am3+3Ro23XRUTyWVbMKpkzBzp1+vS1Tp38uoiIfFpWBHdxse8TddNNYOaPpaW6MSkicimtzioxs1eBEcCNZvYB8I8hhOXtXUhxsYJaRCQdrQZ3CGFcJgoREZH0ZMVQiYiIpE/BLSKSMApuEZGEUXCLiCSMgltEJGHS2qukzW9qdhS4xO4jabkRONaO5SSBvnPuy7fvC/rObXVTCCGtZeeRBPfVMLOadDdayRX6zrkv374v6DtHSUMlIiIJo+AWEUmYbAzu0rgLiIG+c+7Lt+8L+s6RyboxbhERubxs7LhFROQysia4M3UocTYxsy+b2VYz229me83sobhripKZXW9mO82sruX7PhF3TZliZteY2b+a2fq4a8kEMztkZvVmVmtm6Z9jmFBmdoOZ/dzMft3y//OfRfp52TJUYmZfA04BP0v3bMukM7PuQPcQwm4z+31gF/BXIYR9MZcWCTMz4IshhFNmdi3wL8BDIYS3Yy4tcmb2Y6AI+IMQwt1x1xM1MzsEFIUQ8mIet5mtAP5fCGGZmXUEOoUQTkT1eVnTcYcQtgP/FXcdmRRC+DCEsLvl55PAfuBP460qOsGdanl6bcuf7OgcImRmBcBfAsvirkXan5n9AfA1YDlACOE3UYY2ZFFw5zszKwQGADvirSRaLUMGtcARYHMIIae/b4sFwCNAc9yFZFAANpnZLjObGHcxEfsKcBT4actw2DIz+2KUH6jgzgJm1hn4BfCjEMLHcdcTpRBCUwihP1AADDKznB4WM7O7gSMhhF1x15JhQ0MIA4HRwA9ahkJz1ReAgcDiEMIA4DTw91F+oII7Zi1jvb8AUiGEsrjryZSWf0puA74RcylRGwp8q2XM9zXg62a2Kt6SohdCaGx5PAKUA4PirShSHwAf/Na/Hn+OB3lkFNwxarlZtxzYH0J4Pu56omZm3czshpaffw+4E/h1vFVFK4TwaAihIIRQCNwDvBVC+NuYy4qUmX2x5WY7LUMGdwE5O1sshPD/gffN7P+2XBoJRDrBoNUzJzMlU4cSZ5mhwHeB+pZxX4DHQggbYqwpSt2BFWZ2Dd40rAkh5MX0uDzzx0C59yV8AVgdQngj3pIiNxVItcwoeRe4L8oPy5rpgCIikh4NlYiIJIyCW0QkYRTcIiIJo+AWEUkYBbeISMIouEVEEkbBLSKSMApuEZGE+R+EhfZyAyHEYwAAAABJRU5ErkJggg==\n", "text/plain": [ - "" + "
    " ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], @@ -318,8 +315,8 @@ " minibatches=len(y))\n", "sgd_lr.fit(X, y)\n", "\n", - "print('Intercept: %.2f' % sgd_lr.b_)\n", - "print('Slope: %.2f' % sgd_lr.w_)\n", + "print('Intercept: %.2f' % sgd_lr.w_)\n", + "print('Slope: %.2f' % sgd_lr.b_)\n", "\n", "def lin_regplot(X, y, model):\n", " plt.scatter(X, y, c='blue')\n", @@ -337,12 +334,14 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAk4AAAGGCAYAAACNCg6xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAPYQAAD2EBqD+naQAAIABJREFUeJzt3XmYHFW9//H3N9tAwACSMCEYDMiVTUEmIITrDhgBhaui\nGOSKouK+xJ9y9XFBcYGrVyKoKIpeXDCK6AWVJQgu7CKZgAghbAkgkJBATCD7cn5/nG6nM8wkNTPd\n053K+/U89fR0dVX1qTrVVZ86VVMVKSUkSZK0aUOaXQBJkqTNhcFJkiSpIIOTJElSQQYnSZKkggxO\nkiRJBRmcJEmSCjI4SZIkFWRwkiRJKsjgJEmSVJDBSZIkqaCWCU4R8YGImBsRKyLi5og4aCPDvj4i\nroqIxyNiSUTcGBGv7mG4N0XE7Mo0b4+IIxs7F5IkqcxaIjhFxPHA14HTgAOA24EZETG6l1FeBlwF\nHAl0AH8EfhsR+9dM81DgZ8D3gRcBlwKXRMQ+jZoPSZJUbtEKD/mNiJuBv6SUPlJ5H8DDwDkppa8W\nnMbfgZ+nlL5Uef9zYGRK6ZiaYW4CZqWU3l/veZAkSeXX9BaniBgOTASuqfZLOc1dDUwqOI0AngU8\nWdN7UmUatWYUnaYkSVJ3TQ9OwGhgKLCgW/8FwNiC0/gEsA1wUU2/sQOcpiRJ0gaGNbsAAxURJwCf\nBY5JKS0a4LR2BCYD84CVAy+dJEkaZFsBE4AZKaUn6j3xVghOi4B1QHu3/u3A/I2NGBFvAb4HHJdS\n+mO3j+f3Y5qTgQs3VWBJktTy3kr+J7G6anpwSimtiYiZwGHAb+Bf1ywdBpzT23gRMQU4Hzg+pXRl\nD4Pc1MM0jqj07808gJ/+9KfsvffefZiL7L//Gzo74Re/6POo6sHUqVOZNm1as4uhCuujtVgfrcX6\naB2zZ8/mxBNPhMo+vd6aHpwqzgIuqASoW4CpwEjgAoCIOAMYl1I6qfL+hMpnHwb+GhHVlqUVKaWl\nlb/PBv4UER8DLgOmkC9Cf/dGyrESYO+996ajo6PPMzF+PNx2G/RjVPVgu+2261c9qDGsj9ZifbQW\n66MlNeSSm1a4OJyU0kXAx4HTgVnAfsDklNLCyiBjgfE1o7ybfEH5t4FHa7pv1EzzJuAE4BTgNuAN\nwLEppbsaNR9tbbBqVaOmLkmSmq1VWpxIKZ0LnNvLZ+/o9v6VBaf5K+BXAy9dMQYnSZLKrSVanMrC\n4CRJUrkZnOrI4FRfU6ZMaXYRVMP6aC3WR2uxPrYcBqc6MjjVlxui1mJ9tBbro7VYH1sOg1MdtbXB\nunW5kyRJ5WNwqqMRI/Lr6tXNLYckSWoMg1MdtbXlV0/XSZJUTganOjI4SZJUbganOjI4SZJUbgan\nOjI4SZJUbganOjI4SZJUbganOjI4SZJUbganOjI4SZJUbganOqoGJ+/jJElSORmc6qh6A0xbnCRJ\nKieDUx15qk6SpHIzONWRwUmSpHIzONWRwUmSpHIzONWRwUmSpHIzONXRkCEwbJjBSZKksjI41Vlb\nm8FJkqSyMjjVmcFJkqTyMjjVWVubN8CUJKmsDE51NmKELU6SJJWVwanOPFUnSVJ5GZzqzOAkSVJ5\nGZzqzOAkSVJ5GZzqzOAkSVJ5GZzqzOAkSVJ5GZzqzOAkSVJ5GZzqzPs4SZJUXganOrPFSZKk8jI4\n1Zk3wJQkqbwMTnVmi5MkSeVlcKozg5MkSeVlcKozg5MkSeVlcKozg5MkSeVlcKozg5MkSeVlcKoz\ng5MkSeVlcKozb4ApSVJ5GZzqzBYnSZLKy+BUZyNGwJo1sH59s0siSZLqzeBUZ21t+dXTdZIklY/B\nqc6qwcnTdZIklY/Bqc4MTpIklZfBqc4MTpIklZfBqc4MTpIklZfBqc4MTpIklZfBqc78rzpJksrL\n4FRntjhJklReBqc6GzEivxqcJEkqH4NTndniJElSeRmc6szgJElSeRmc6szgJElSeRmc6szgJElS\neRmc6szgJElSeRmc6mzo0Nx5HydJksrH4NQAbW22OEmSVEYGpwYwOEmSVE4GpwYYMcLgJElSGRmc\nGsAWJ0mSysng1AAGJ0mSysng1AAGJ0mSysng1AAGJ0mSysng1AAGJ0mSysng1ABtbd4AU5KkMjI4\nNYAtTpIklZPBqQEMTpIklZPBqQG8AaYkSeVkcGoAW5wkSSong1MDGJwkSSqnlglOEfGBiJgbESsi\n4uaIOGgjw46NiAsjYk5ErIuIs3oY5qSIWF/5fH2lW97YucgMTpIklVNLBKeIOB74OnAacABwOzAj\nIkb3Mkob8DjwReC2jUx6CTC2pntuvcq8MQYnSZLKqSWCEzAVOC+l9OOU0t3Ae4HlwMk9DZxSejCl\nNDWl9FNg6Uamm1JKC1NKj1e6hfUv+jMZnCRJKqemB6eIGA5MBK6p9kspJeBqYNIAJ79tRMyLiIci\n4pKI2GeA0yvEG2BKklROTQ9OwGhgKLCgW/8F5NNr/TWH3GJ1DPBW8rzeGBHjBjDNQmxxkiSpnIY1\nuwCNklK6Gbi5+j4ibgJmA+8hX0vVq6lTp7Lddttt0G/KlClMmTKl0HcbnCRJarzp06czffr0Dfot\nWbKkod/ZCsFpEbAOaO/Wvx2YX68vSSmtjYhZwB6bGnbatGl0dHT0+7u8AaYkSY3XU6NGZ2cnEydO\nbNh3Nv1UXUppDTATOKzaLyKi8v7Gen1PRAwBXgg8Vq9p9qZ6jVNKjf4mSZI0mFqhxQngLOCCiJgJ\n3EL+L7uRwAUAEXEGMC6ldFJ1hIjYHwhgW2BM5f3qlNLsyuefJZ+quw/YHjgV2BU4v9Ez09aWX1ev\n7vpbkiRt/loiOKWULqrcs+l08im624DJNbcPGAuM7zbaLKDaptMBnAA8COxe6bcD8L3KuIvJrVqT\nKrc7aKhqWFq1yuAkSVKZtERwAkgpnQuc28tn7+ih30ZPM6aUPgZ8rD6l65va4CRJksqj6dc4lVHt\nqTpJklQeBqcGsMVJkqRyMjg1gMFJkqRyMjg1gMFJkqRyMjg1wIgR+dXgJElSuRicGsAWJ0mSysng\n1AAGJ0mSysng1AAGJ0mSysng1AAGJ0mSysng1ADeAFOSpHIyODWALU6SJJWTwakBhg2DIUMMTpIk\nlY3BqUHa2gxOkiSVjcGpQUaMMDhJklQ2BqcGscVJkqTyMTg1iMFJkqTyMTg1iMFJkqTyMTg1iMFJ\nkqTyMTg1SFubN8CUJKlsDE4NYouTJEnlY3BqEIOTJEnlY3BqEIOTJEnlY3BqEG+AKUlS+RicGsQW\nJ0mSysfg1CAGJ0mSysfg1CAGJ0mSysfg1CAGJ0mSysfg1CDeAFOSpPIxODWILU6SJJWPwalBDE6S\nJJWPwalBDE6SJJWPwalBvAGmJEnlY3BqEFucJEkqH4NTg1SDU0rNLokkSaoXg1ODtLXl1zVrmlsO\nSZJUPwanBqkGJ+/lJElSeRicGqQanLzOSZKk8jA4NYjBSZKk8jE4NYjBSZKk8jE4NUg1OK1c2dxy\nSJKk+jE4NcgOO+TXJ59sbjkkSVL9GJwapL09vy5Y0NxySJKk+jE4Nciznw1DhxqcJEkqE4NTgwwZ\nAjvtZHCSJKlMDE4N1N5ucJIkqUwMTg1kcJIkqVwMTg1kcJIkqVwMTg1kcJIkqVwMTg00dqzBSZKk\nMjE4NVB7Ozz9NCxb1uySSJKkejA4NZA3wZQkqVwMTg1kcJIkqVwMTg1kcJIkqVwMTg204475DuIG\nJ0mSysHg1EBDh8KYMQYnSZLKwuDUYN7LSZKk8jA4NZjBSZKk8jA4NZjBSZKk8jA4NVh7O8yf3+xS\nSJKkejA4NZiPXZEkqTwMTg3W3g5PPQUrVjS7JJIkaaAMTg3mTTAlSSqPfgWniPhcRIzsof/WEfG5\ngRerPAxOkiSVR39bnE4Dtu2h/8jKZ6owOEmSVB79DU4BpB767w882f/ilM/o0RBhcJIkqQyG9WXg\niFhMDkwJuCciasPTUHIr1HfrV7zN37BhOTwZnCRJ2vz1KTgBHyW3Nv2QfEpuSc1nq4F5KaWb6lS2\n0vAmmJIklUOfglNK6UcAETEXuCGltLYhpSoZb4IpSVI59Pcap6eAvatvIuLYiLgkIr4SESPqU7Ty\nsMVJkqRy6G9wOg94PkBE7A78AlgOvAn4an8mGBEfiIi5EbEiIm6OiIM2MuzYiLgwIuZExLqIOKuX\n4d4UEbMr07w9Io7sT9kGyruHS5JUDv0NTs8Hbqv8/SbgzymlE4C3A2/s68Qi4njg6+Trpg4Abgdm\nRMToXkZpAx4HvlhTju7TPBT4GfB94EXApcAlEbFPX8s3ULY4SZJUDgO5HUF13MOByyt/Pwz0FnY2\nZipwXkrpxymlu4H3kluwTu5p4JTSgymlqSmlnwJLe5nmh4ErUkpnpZTmpJQ+B3QCH+xH+QakvR2W\nLIGVKwf7myVJUj31NzjdCnwmIv4TeDlwWaX/bkCf2lYiYjgwEbim2i+llICrgUn9LB+Vca/u1m/G\nAKfZL9WbYD7++GB/syRJqqf+BqePAh3At4Avp5Tuq/Q/Drixj9MaTb4HVPfAtQAY28/yURm33tPs\nF+8eLklSOfT1Pk4ApJT+Brywh48+AawbUIlKyOAkSVI59Cs4VUXERLpuS3BXSqmzH5NZRA5b7d36\ntwMDufvR/P5Oc+rUqWy33XYb9JsyZQpTpkzpV0HGjMmvBidJkupn+vTpTJ8+fYN+S5Ys6WXo+uhX\ncIqInci3IHg58M9K7+0j4o/AW1JKC4tOK6W0JiJmAocBv6lMPyrvz+lP+Spu6mEaR1T6b9S0adPo\n6OgYwFdvaPhw2HFHb4IpSVI99dSo0dnZycSJExv2nf29xumb5OfS7ZtSenZK6dnAC4BR9C/snAW8\nOyLeFhF7kZ93NxK4ACAizoiIH9WOEBH7R8SLKuUYU3m/d80gZwOviYiPRcSeEfF58kXo3+pH+QbM\nWxJIkrT56++putcAh6eUZld7pJTuiogPAFf1dWIppYsq92w6nXw67TZgck3L1VhgfLfRZpEfNgz5\nQvUTgAeB3SvTvCkiTgC+XOnuBY5NKd3V1/LVg8FJkqTNX3+D0xBgTQ/919DPVqyU0rnAub189o4e\n+m3ye1JKvwJ+1Z/y1Ft7Ozz2WLNLIUmSBqK/p+r+AJwdEeOqPSJiF2AaNfdjUhcfuyJJ0uavv8Hp\ng+TrmeZFxP0RcT8wt9LvQ/UqXJl4qk6SpM1ff+/j9HBEdJAft7JXpffslFL3O3Wror0dFi+G1ath\nxIhml0aSJPVHn1qcIuJVEXFXRIxK2e9TSt9MKX0T+GtE3BkRkxtU1s2aj12RJGnz19dTdR8Fvp9S\nesaDdVNKS4Dz8FRdj7x7uCRJm7++Bqf9gSs38vlVwH79L055VYOTN8GUJGnz1dfg1E7PtyGoWguM\n6X9xymunnfKrLU6SJG2++hqcHiHfIbw3+wHeragHI0bADjsYnCRJ2pz1NThdDnwxIrbq/kFEbA18\nAfhdPQpWRt6SQJKkzVtfb0fwJeANwD0R8S1gTqX/XsAHgKHkx5uoBwYnSZI2b30KTimlBRFxKPAd\n4Awgqh8BM4APpJSMBr3w7uGSJG3e+nwDzJTSg8BREbEDsAc5PN2bUlpc78KVTXs73HFHs0shSZL6\nq78P+aUSlP5ax7KUnqfqJEnavPX3WXXqh/Z2eOIJWLOxGzpIkqSWZXAaRNWbYN5/f3PLIUmS+sfg\nNIgOPDDfCHPSJPjhDyGlZpdIjbZiBcyb1+xSSJLqxeA0iMaOhbvugmOOgXe+E444Ah54oNmlGrhV\nq+D22+G66wYvDK5dmy+0/8EP4Ctf2fRF9ynB008PTtkAFi2C00+H5z4X/u3f4Ne/HrzvliQ1Tr8v\nDlf/7Lgj/OhHcMIJ8J73wAteAF/4ApxyCmy3XbNLt2kpQWcnzJgBf/tbDiz33JODDMDrXgfnn9/1\niJl6WbkS/vQn+P3v4ZZbchmWL4chQ2DkSPj0p+GFL4S3vhWmTIFdd4V//AOuvjqPc8018PjjcPDB\ncOyxObzuvTdEbPKr++Tee2HaNLjggvz+5JPz8wmPPx5+/nN44xvr+33r1sGf/wyzZuX1Z4cdurpd\nd83rmySpfiJ5vuhfIqIDmDlz5kw6Ojoa/n1PPw2f/Syccw4MGwaveQ285S05fGy7bQ4pd98N116b\nW3PmzIFDDoEjj4RXvCIHhlpPPZVbfu66K4eKVau6upUrc79ly7peI+DQQ+Hww/NpxGG9xOh16+DG\nG3Orya9/DQ89BKNGwf7757BS7ebPh/e+N4eZH/4Qjj56YMtn/ny47DL43e9y+Fm2DMaPz6c6DzoI\nXvxi6OjIj7O58kq48EL4zW/yvI4fDw8/nOexoyO37u22Ww58M2bkae2xB7z+9bn1b889ey5DSjmo\n3XFHXu677NLzMNdeC9/4Blx6KYwZAx/6ELzvfTm4rF0LJ54IF18Mv/jFwMNTSnDrrfCzn+XpPfZY\nXheWL99wuGHDcoj8+MdhvxI/envRoryOrFoFb3sbbL11sfFSyutIZydss03+HdQ7SG8OFi6E66/P\n61FbW1c3YkQ+sHve8zY+/vLleVlus83Ay/LII/lAcuXK/Js57DAYOnTg0y1i6dJ84DN3bj69Pncu\nPPggTJwI//Vfz9zeDqaU4IYb8rbkwAPz/qEsHnssnzno6ICjjqrPNDs7O5k4cSLAxJRSZ32m2sXg\nVGOwg1PVI4/AL3+Zd4I335w3/AcfDH//e94pDB0KBxyQd+7XX59/zG1t8PKX5/Bw331543/vvfkH\nNmQIbLVV7qobwa22yj/8kSPzBm7kyLxxuv76HLhGjYJXvjJ/79NP59aZanfffbkc48bloPGGN8DL\nXtZz0Jo/PweRyy/PweF//id//8MP54vi778/b6h33jmHm2o3fHhuwfrLX7q6e+7J8zJpErz2tTlQ\n7rPPxnduS5fC//1fboH593+HV73qma0uK1fCH/6QQ9bFF+f/dHzVq3J5jz02l2XRIvjpT/MP+u9/\nz+NFwEtfmluPjjsOtt8+19m0afn79tkHPvpR+M//zMu71tq1uf8vf5lbno47btPrxfz5OTg/+mhX\n99hjOTTdd1/+Z4Pjj8+tly9+MaxfD0uWwOLFubvuuly2hx+GyZPh1FNzHc+bl6dR7ZYuzTuoo47K\ny3r48E2XbTCsXw8zZ+Z1Zscdczd6dH5dsAAuuSQH1euv71rvd9oJPvMZeNe78o6/1ooVueXx2mtz\nfXV2wpNPdn0+aRKceWZet1vFww/nVtOnn8478Be9aGA78JTy9uOGG/JyuPbavI5B/j1XW46rIvJv\n4hOfyAdZtebMgW99K7eurl+fD/pOOSWvi7W/0fXru1qp29rygcNuu204rRUr8rbizDPz9mnHHXO5\nxo3LAeqkk/Lvqx5SyuvPHXfkclW7++7rGmbbbXMZd9kF/vjH/Fs7++y8LPoTrletyst97tx8ycZ+\n+xWfzg035Bb1P/85vx8yJB+oHnJI7g4/HJ7znL6Xqdnmz4evfhW+8528jqxenbeLZ5+d630gDE6D\nqFnBqda8eXDRRfnHst9+eSN+yCHwrGflz1PKG6wrrsitLLNm5UDV0ZG7Aw7Ip6CK7vzWrs07z6uv\nzt1tt+XTPGPG5J3QTjvlH+VRR+UN4pACV8WlBOedBx/7WN5QLlvWdQuGIUPy9J94YsNxhg7NLVvD\nhuWdw8EH5x3Z5Ml5Z9koK1fCr36Vf7w33JA3agceCFddlefj2GPzTvigg+C3v82h5/e/z59V52Py\nZJg6FV796o1vDNeuzS0iF10E3/wm7LVXHr7aLVmSN+AzZ+bu0Ue7xh01Km9Mdt45XzP1pjflVsfe\nWgmr1qzJ3/e1r+XWyNqWqfHj87yOHJnnd+HC/D2vfnVe9ltvncPHiBF5fWpr6wrdtQF8q63ysFtv\n3XvrQPUas8WL4Z//zK9r1nQFodGj8/hLl+ayXHZZXsc3dt+ztra80/iP/8ih+qmncmvFhRfma8s+\n97kciK+8MrdIXXNN3kGPH59DyAEHdP1m7roLPvnJvPyPPBLOOCO3qFYtX56Xz7p1+TexzTYb1vXy\n5V074lmz8rC1y2mbbfI87rFHrr/ddsvlr7V8eV6fbr89r2NXXZXDQ0Re/qtX5+W7zz55fRw/Pver\ndmvW5OHGjs1de3t+Xbx4wwOSxx/P37fPPvlA4GUvy6/jx3ftwKqt1L/7XV535szJwekTn8jlPuec\nvFzHjMmXHIwYkQ8yHnww79RPOSXX6xVX5MD0+ON5G7Z2ba6DAw+EN7857yj/+tcc6h99FD7ykRx8\nR43K26Uf/QimT88Bd++9u8r6spfl8lbXrcWL87Zz3ry8fq1Zk79rzZrcLVjQdeD2wAN5mwQ5IB1w\nQF4fOjryd+y2Gzz72V31e//9uQX5iivydvCcc3puhasGsjvvzAdbd96ZD2YfeCAH4NpdbXt7bgWf\nPDm/Vv/jutasWXlZXH55Xhe/9CWYMCEfXFe7u+7K0500KW8Tjjuua7k8+mjXenTNNfl3Wl3fq687\n79z3IPjkk3l92HrrXJ7tt+95uJUr84He6tV5uz90aH5dvTrvH77znbzeTJ2a6/2KK/KB58qVOUC/\n5z3F9jc9MTgNolYITmUyZw785Cf5x/m85+Xuuc/NP5bVq3NL20MP5Y3KU0/lwHTAAc9srRksd9wB\n3/1uDo/HHZePdMeMeeZwixblU5b33ZePhPfdt/h3rF0Lb3973rn3ZMcduzbiEyfmndAuuwy8aT6l\nHIxvvTVvhCdO3HBjXW3dueKK3P3tb7mOurdAbMrw4V0tPbWbliLTGjmya7h9982neo8+Opd38eIc\nKhYtyq8jR+bQ1NNyufNOOO20HIghb7Bf8pIcrl77Wnj+83veWaxfn8f59KfzDm+//fJOeNGiZ54G\n3XrrroOL5ctzwFm/PgfZfffNIXfFiryDrp4eX7Ag94O8Q9h11xwQnngidytXdk1/woS8Q331q3P4\n23bbPF/VVsK//jVPr3pKrdqtWJH7dz8wGTUqH/gcfHDuDjmk53W7J+vXdwWo66/P/To68s7uzW/u\n+r2uW5fXse99L7fmrl2b6+7II/NlCIcemgPZZZflMH/55V3z/LrXwde/nkNld6tW5WGvvDK3os6e\n3bWMRo3KYWnp0g3HqYbNYcPy6+jRXdugarf33vn7ih4MXnppnucFC/LyW7cuz2O1e/jhruXe1pYP\njPbcM3/X7rvnbrfdcqvTjBk50Nx2Wx5+++27gvbIkXmdnTUrl++LX8yhqKdy/vOfuW4uuihPc/Xq\nXL/LluXwVnupwtq1XS2tiyvP+RgyJC/DZz1rw9fttsuv1X7V1u/Zs/NBQa3tt8/zNWFCrs9HHsld\n93Ww1qhROSRNnbph8HryyXxa9Pzzcxg89tgNz55stVWu02oIGzo0d91/z/fd18mHPmRwGhQGJw2G\nlHJgXLs275RS6rpGZJddWus6m/Xr8xH76tV5o7hixYbXyi1fnvvVdqtXd81D9XX48A0vXN9hh7wB\nrIaGRYtyt9VWeSc7YcLAy37bbTncHnZY/r6i1qzJp59uvTXvcEePziFjzJi8kV64cMNT2cOHdx3F\n77tv78F//fp8BH7vvV3dsmVdrW7Vbo898s52IOvBmjW5bPPn5x3xnnv2/+i91q235sDQ/XRcdwsX\n5vV75517H+bpp3NI32mnfNlBUdVrsq67Lq9vu+3WteOeMCEvw3rMa0+WLYOzzsoHhUOH5nW4uiMf\nNy7X/7775vorcm3W/Pm5NeiRR7p+S9XXQw/NLdSbalWuWrIkt4pfckkOO5Mn53W/e0Cubn9mzcoh\ncOnSfOC6dGnv3ejROWjutVd+ff7zc6CtvR5s3ry87u+yS1c3blw+yFi3Lq//69bl7z/ooI3/Jq+7\nLoequXO7tjt9iyqdgMFpUBicJElqLSnlIL5yZQ5ftUFs3boNhwP42986OfroxgUnb0cgSZJaVvXU\na9Frd6vX8TWKN8CUJEkqyOAkSZJUkMFJkiSpIIOTJElSQQYnSZKkggxOkiRJBRmcJEmSCjI4SZIk\nFWRwkiRJKsjgJEmSVJDBSZIkqSCDkyRJUkEGJ0mSpIIMTpIkSQUZnCRJkgoyOEmSJBVkcJIkSSrI\n4CRJklSQwUmSJKkgg5MkSVJBBidJkqSCDE6SJEkFGZwkSZIKMjhJkiQVZHCSJEkqyOAkSZJUkMFJ\nkiSpIIOTJElSQQYnSZKkggxOkiRJBRmcJEmSCjI4SZIkFWRwkiRJKsjgJEmSVJDBSZIkqSCDkyRJ\nUkEGJ0mSpIIMTpIkSQUZnCRJkgoyOEmSJBVkcJIkSSrI4CRJklSQwUmSJKkgg5MkSVJBBidJkqSC\nWiY4RcQHImJuRKyIiJsj4qBNDP+KiJgZESsj4p6IOKnb5ydFxPqIWFd5XR8Ryxs7F5IkqcxaIjhF\nxPHA14HTgAOA24EZETG6l+EnAL8DrgH2B84Gzo+II7oNugQYW9M9twHFlyRJW4iWCE7AVOC8lNKP\nU0p3A+8FlgMn9zL8+4AHUkqnppTmpJS+DVxcmU6tlFJamFJ6vNItbNgcSJKk0mt6cIqI4cBEcusR\nkNMOcDUwqZfRDql8XmtGD8NvGxHzIuKhiLgkIvapU7ElSdIWqOnBCRgNDAUWdOu/gHx6rSdjexl+\nVES0Vd7PIbdYHQO8lTyvN0bEuHoUWpIkbXmGNbsAjZJSuhm4ufo+Im4CZgPvIV9LJUmS1CetEJwW\nAeuA9m7924H5vYwzv5fhl6aUVvU0QkppbUTMAvbYVIGmTp3Kdtttt0G/KVOmMGXKlE2NKkmSBsn0\n6dOZPn36Bv2WLFnS0O+MfDlRc0XEzcBfUkofqbwP4CHgnJTS13oY/kzgyJTS/jX9fgZsn1I6qpfv\nGALcCVyWUvp4L8N0ADNnzpxJR0fHQGdLkiQNss7OTiZOnAgwMaXUWe/pt8I1TgBnAe+OiLdFxF7A\nd4GRwAUAEXFGRPyoZvjvArtHxH9HxJ4R8X7guMp0qIzz2Yg4IiJ2i4gDgAuBXYHzB2eWJElS2bTC\nqTpSShfBkM95AAAMEUlEQVRV7tl0OvmU223A5JrbB4wFxtcMPy8ijgamAR8G/gG8M6VU+592OwDf\nq4y7GJgJTKrc7kCSJKnPWiI4AaSUzgXO7eWzd/TQ71rybQx6m97HgI/VrYCSJGmL1yqn6iRJklqe\nwUmSJKkgg5MkSVJBBidJkqSCDE6SJEkFGZwkSZIKMjhJkiQVZHCSJEkqyOAkSZJUkMFJkiSpIIOT\nJElSQQYnSZKkggxOkiRJBRmcJEmSCjI4SZIkFWRwkiRJKsjgJEmSVJDBSZIkqSCDkyRJUkEGJ0mS\npIIMTpIkSQUZnCRJkgoyOEmSJBVkcJIkSSrI4CRJklSQwUmSJKkgg5MkSVJBBidJkqSCDE6SJEkF\nGZwkSZIKMjhJkiQVZHCSJEkqyOAkSZJUkMFJkiSpIIOTJElSQQYnSZKkggxOkiRJBRmcJEmSCjI4\nSZIkFWRwkiRJKsjgJEmSVJDBSZIkqSCDkyRJUkEGJ0mSpIIMTpIkSQUZnCRJkgoyOEmSJBVkcJIk\nSSrI4CRJklSQwUmSJKkgg5MkSVJBBidJkqSCDE6SJEkFGZwkSZIKMjhJkiQVZHCSJEkqyOAkSZJU\nkMFJkiSpIIOTJElSQQYnSZKkggxOkiRJBRmcJEmSCjI4SZIkFWRwkiRJKsjgJEmSVJDBSZIkqSCD\nkyRJUkEGJ0mSpIIMTpIkSQUZnCRJkgpqmeAUER+IiLkRsSIibo6IgzYx/CsiYmZErIyIeyLipB6G\neVNEzK5M8/aIOLJxc6B6mz59erOLoBrWR2uxPlqL9bHlaIngFBHHA18HTgMOAG4HZkTE6F6GnwD8\nDrgG2B84Gzg/Io6oGeZQ4GfA94EXAZcCl0TEPg2bEdWVG6LWYn20FuujtVgfW46WCE7AVOC8lNKP\nU0p3A+8FlgMn9zL8+4AHUkqnppTmpJS+DVxcmU7Vh4ErUkpnVYb5HNAJfLBxsyFJksqs6cEpIoYD\nE8mtRwCklBJwNTCpl9EOqXxea0a34ScVGEaSJKmwpgcnYDQwFFjQrf8CYGwv44ztZfhREdG2iWF6\nm6YkSdJGDWt2AVrMVgCzZ89udjkELFmyhM7OzmYXQxXWR2uxPlqL9dE6avbhWzVi+q0QnBYB64D2\nbv3bgfm9jDO/l+GXppRWbWKY3qYJMAHgxBNP3HiJNWgmTpzY7CKohvXRWqyP1mJ9tJwJwI31nmjT\ng1NKaU1EzAQOA34DEBFReX9OL6PdBHS/tcCrK/1rh+k+jSO6DdPdDOCtwDxgZbE5kCRJLWQrcmia\n0YiJR74Ou7ki4s3ABeT/pruF/N9xxwF7pZQWRsQZwLiU0kmV4ScAdwDnAj8kB6RvAEellK6uDDMJ\n+BPwKeAyYArwSaAjpXTXIM2aJEkqkaa3OAGklC6q3LPpdPLptNuAySmlhZVBxgLja4afFxFHA9PI\ntx34B/DOamiqDHNTRJwAfLnS3Qsca2iSJEn91RItTpIkSZuDVrgdgSRJ0mbB4FTR12flaeAi4lMR\ncUtELI2IBRHxfxHx/B6GOz0iHo2I5RHx+4jYoxnl3dJExCcjYn1EnNWtv/UxSCJiXET8JCIWVZb3\n7RHR0W0Y62MQRMSQiPhiRDxQWdb3RcRnehjO+miAiHhpRPwmIh6pbJeO6WGYjS77iGiLiG9Xfk9P\nRcTFEbFTX8ticKLvz8pT3bwU+CZwMHA4MBy4KiK2rg4QEf9FfkzOKcCLgWXkuhkx+MXdclQOHE4h\n/xZq+1sfgyQitgduAFYBk4G9gf8HLK4ZxvoYPJ8E3gO8H9gLOBU4NSL+9Rgv66OhtiFf//x+4BnX\nGBVc9t8AjgbeCLwMGAf8qs8lSSlt8R1wM3B2zfsgX3B+arPLtiV15LvIrwdeUtPvUWBqzftRwArg\nzc0ub1k7YFtgDvAq4I/AWdZHU+rhTODPmxjG+hi8+vgt8P1u/S4Gfmx9DHpdrAeO6dZvo8u+8n4V\n8PqaYfasTOvFffn+Lb7FqZ/PylNjbE8+kngSICJ2I/9HZW3dLAX+gnXTSN8GfptS+kNtT+tj0L0O\nuDUiLqqcyu6MiHdVP7Q+Bt2NwGER8W8AEbE/8O/A5ZX31keTFFz2B5LvJFA7zBzgIfpYPy1xO4Im\n29iz8vYc/OJsmSo3Pf0GcH3qumXEWHKQ8pmDgyQi3gK8iLyR6c76GFy7A+8jX0bwZfLph3MiYlVK\n6SdYH4PtTHKrxd0RsY58qcunU0o/r3xufTRPkWXfDqyuBKrehinE4KRWcS6wD/kITk0QEc8hh9fD\nU0prml0eMQS4JaX02cr72yPiBeQbBf+kecXaYh0PnAC8BbiLfIBxdkQ8Wgmy2kJs8afq6N+z8lRH\nEfEt4CjgFSmlx2o+mk++3sy6GRwTgTFAZ0SsiYg1wMuBj0TEavKRmfUxeB4Duj9xfDawa+Vvfx+D\n66vAmSmlX6aU7kwpXUi+CfOnKp9bH81TZNnPB0ZExKiNDFPIFh+cKkfW1WflARs8K6/uDwfUhiqh\n6VjglSmlh2o/SynNJa/QtXUzivxfeNZN/V0NvJB8JL1/pbsV+Cmwf0rpAayPwXQDz7xcYE/gQfD3\n0QQjyQfZtdZT2Y9aH81TcNnPBNZ2G2ZP8oHIxp5h+wyeqsvOAi6oPGy4+qy8keTn56lBIuJc8jME\njwGWRUT1aGFJSqn6kOVvAJ+JiPvID1/+Ivk/Hi8d5OKWXkppGfkUxL9ExDLgiZRSteXD+hg804Ab\nIuJTwEXkncC7gHfXDGN9DJ7fkpf1P4A7gQ7yvuL8mmGsjwaJiG2APcgtSwC7Vy7QfzKl9DCbWPYp\npaUR8QPgrIhYDDwFnAPckFK6pU+Fafa/FbZKR743xDzyvy/eBBzY7DKVvSMfra3roXtbt+E+T/5X\n0+Xkp13v0eyybykd8AdqbkdgfQz68j8K+FtlWd8JnNzDMNbH4NTFNuSD7LnkewTdC3wBGGZ9DMry\nf3kv+4wfFl32QBv53oGLKsHpl8BOfS2Lz6qTJEkqaIu/xkmSJKkog5MkSVJBBidJkqSCDE6SJEkF\nGZwkSZIKMjhJkiQVZHCSJEkqyOAkSZJUkMFJkiSpIIOTJPUgItZHxDHNLoek1mJwktRyIuJ/K8Fl\nXeW1+vflzS6bpC3bsGYXQJJ6cQXwdrqehg6wqjlFkaTMFidJrWpVSmlhSunxmm4J/Os02nsj4vKI\nWB4R90fEG2tHjogXRMQ1lc8XRcR5EbFNt2FOjoi/R8TKiHgkIs7pVoYxEfHriFgWEfdExOtqxt0+\nIi6MiMcr3zEnIk5q2NKQ1BIMTpI2V6cDvwT2Ay4Efh4RewJExEhgBvAEMBE4Djgc+GZ15Ih4H/At\n4LvAvsDRwD3dvuNzwM+BFwKXAxdGxPaVz74E7AVMrry+D1hU75mU1FoipdTsMkjSBiLif4ETgZU1\nvRPwlZTSmRGxHjg3pfTBmnFuAmamlD4YEe8GzgCek1JaWfn8SOC3wM4ppYUR8Q/gByml03opw3rg\n9JTS5yvvRwJPA69JKV0VEZcCC1NK76rv3EtqZV7jJKlV/QF4Lxte4/Rkzd83dxv+JmD/yt97AbdX\nQ1PFDeRW9j0jAmBc5Ts25o7qHyml5RGxFNip0us7wK8iYiJwFXBJSummTc2UpM2bwUlSq1qWUprb\noGmvKDjcmm7vE5VLHFJKV0bErsBRwBHA1RHx7ZTSqfUrpqRW4zVOkjZXh/Twfnbl79nA/hGxdc3n\nLwHWAXenlJ4G5gGHDaQAKaUnUko/SSm9DZgKnDKQ6UlqfbY4SWpVbRHR3q3f2pTSE5W/3xQRM4Hr\nyddDHQScXPnsQuDzwI8i4gvk02vnAD9OKVUv4P488J2IWEi+9cEo4NCU0reKFK4y3ZnAncBWwGuB\nu/o6k5I2LwYnSa3qNcCj3frNAfap/H0a8Bbg28BjwFtSSncDpJRWRMRk4GzgFmA5cDHw/6oTSin9\nOCLayC1FXyP/R9zFNd/V03/OpJr+q4GvABPIp/6uA6b0Yz4lbUb8rzpJm53Kf7z9R0rpN80ui6Qt\ni9c4SZIkFWRwkrQ5sqlcUlN4qk6SJKkgW5wkSZIKMjhJkiQVZHCSJEkqyOAkSZJUkMFJkiSpIIOT\nJElSQQYnSZKkggxOkiRJBRmcJEmSCvr/YSwKaCdrwd8AAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAagAAAEYCAYAAAAJeGK1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4xLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvDW2N/gAAIABJREFUeJzt3Xt0XNWd5v3vU1Uq2ZZ808XG2MY22MSYSwwIh4aEDhAS6MkA0w0BFp2QTGZ4m7eZvmS6J9AzSa9hkpmX1e+apLOaYUJ3yD0hNJNM3A2Jk3BJejpcLHMztnEQxsbyVbZl2bJsSSX95o86MmUh2xI6ZRXy81mrlursc9E+pVI9tffZtUsRgZmZWaXJjHUFzMzMhuKAMjOziuSAMjOziuSAMjOziuSAMjOziuSAMjOzilTWgJJ0taT1klok3TXE+s9IWivpZUmPS5pXsu42Sa8lt9tKyi+UtDo55lckqZznYGZmY0Pl+hyUpCzwG+AqoBVYCdwSEWtLtrkceDYiuiTdAXwwIm6SVAc0A01AAKuACyOiXdJzwB8DzwCPAV+JiJ+U5STMzGzMlLMFtQxoiYgNEdEDPARcV7pBRDwZEV3J4jPAnOT+R4CfR8SeiGgHfg5cLWkWMCUino5isn4LuL6M52BmZmMkV8ZjzwY2lyy3Au87xvafBgZaQkPtOzu5tQ5R/jaSbgduB6ipqblw8eLFI6n7sPT1B2u37WPW1Ak01Fanfnwzs/Fo1apVuyKi8XjblTOghro2NGR/oqTfp9id99vH2XfYx4yIB4AHAJqamqK5ufl49R2xA90Fzv7LFfzF7yzm9svOSP34ZmbjkaRNw9munF18rcDckuU5wNbBG0n6EPAfgWsjovs4+7byVjfgUY95olRliw9fb5/nMzQzS1s5A2olsEjSAkl54GZgeekGks4HvkoxnHaWrFoBfFjSdEnTgQ8DKyJiG7Bf0sXJ6L1PAD8u4zkcU1W22KDrLvSPVRXMzMatsnXxRURB0p0UwyYLPBgRayTdAzRHxHLgr4Ba4O+T0eJvRsS1EbFH0n+hGHIA90TEnuT+HcA3gIkUr1mN2Qg+SeSzGXr7HFBmZmkr5zUoIuIxikPBS8s+X3L/Q8fY90HgwSHKm4FzUqzmqFRlRa9bUGZmqfNMEqNUlcvQ4xaUmVnqHFCj5C4+M7PycECNUlU2Q0/Bo/jMzNLmgBqlvLv4zMzKwgE1SvlsxoMkzMzKwAE1SlU5+RqUmVkZOKBGqSrrLj4zs3JwQI1ScZCEA8rMLG0OqFGqznmYuZlZOTigRsldfGZm5eGAGqXiVEf+HJSZWdocUKOUz2XdxWdmVgYOqFGqyspdfGZmZeCAGqW8R/GZmZWFA2qU8h7FZ2ZWFg6oUarKZvyV72ZmZeCAGiV/UNfMrDwcUKM0MJt5hFtRZmZpckCNUj4rAAr9DigzszQ5oEapKlt8CN3NZ2aWrrIGlKSrJa2X1CLpriHWXybpeUkFSTeUlF8u6cWS2yFJ1yfrviHpjZJ1S8t5DsczEFAeyWdmlq5cuQ4sKQvcB1wFtAIrJS2PiLUlm70JfBL4s9J9I+JJYGlynDqgBfhZySZ/HhGPlKvuI5HPJS0oB5SZWarKFlDAMqAlIjYASHoIuA44HFARsTFZd6xX9xuAn0REV/mq+s7l3cVnZlYW5ezimw1sLlluTcpG6mbg+4PKvijpZUlfklQ91E6SbpfULKm5ra3tHfza4anKFQdJ+LNQZmbpKmdAaYiyEb2KS5oFnAusKCm+G1gMXATUAZ8dat+IeCAimiKiqbGxcSS/dkTy2Szga1BmZmkrZ0C1AnNLlucAW0d4jI8BP4qI3oGCiNgWRd3A1yl2JY6ZqmSYubv4zMzSVc6AWgkskrRAUp5iV93yER7jFgZ17yWtKiQJuB54JYW6vmNVHiRhZlYWZQuoiCgAd1LsnlsHPBwRayTdI+laAEkXSWoFbgS+KmnNwP6S5lNsgf1y0KG/K2k1sBpoAL5QrnMYjuqBYeZuQZmZpaqco/iIiMeAxwaVfb7k/kqKXX9D7buRIQZVRMQV6dZydAZaUB4kYWaWLs8kMUqHZ5Lo6xvjmpiZjS8OqFF663NQbkGZmaXJATVK+cOfg/I1KDOzNDmgRsmTxZqZlYcDapQ8WayZWXk4oEYpn3NAmZmVgwNqlAZaUN3u4jMzS5UDapTyWX8OysysHBxQo+QuPjOz8nBAjVI2IzLyKD4zs7Q5oFJQlc24BWVmljIHVAryuYxnMzczS5kDKgX5bMZdfGZmKXNApcBdfGZm6XNApSCfy3iYuZlZyhxQKajKytegzMxS5oBKQZWvQZmZpc4BlYJiF58DyswsTQ6oFOQ9SMLMLHUOqBS4i8/MLH1lDShJV0taL6lF0l1DrL9M0vOSCpJuGLSuT9KLyW15SfkCSc9Kek3SDyTly3kOw1GVy9DjUXxmZqkqW0BJygL3AdcAS4BbJC0ZtNmbwCeB7w1xiIMRsTS5XVtSfi/wpYhYBLQDn0698iOUz2bodQvKzCxV5WxBLQNaImJDRPQADwHXlW4QERsj4mVgWK/ukgRcATySFH0TuD69Kr8z+ZyHmZuZpa2cATUb2Fyy3JqUDdcESc2SnpE0EEL1wN6IKBzvmJJuT/ZvbmtrG2ndR8QzSZiZpS9XxmNriLKRXKg5LSK2SjodeELSamDfcI8ZEQ8ADwA0NTWV9QKRu/jMzNJXzhZUKzC3ZHkOsHW4O0fE1uTnBuAp4HxgFzBN0kCwjuiY5VLl2czNzFJXzoBaCSxKRt3lgZuB5cfZBwBJ0yVVJ/cbgEuBtRERwJPAwIi/24Afp17zEfJs5mZm6StbQCXXie4EVgDrgIcjYo2keyRdCyDpIkmtwI3AVyWtSXY/C2iW9BLFQPr/ImJtsu6zwGcktVC8JvW1cp3DcHmyWDOz9JXzGhQR8Rjw2KCyz5fcX0mxm27wfr8Gzj3KMTdQHCFYMTxZrJlZ+jyTRAqqshn6+oO+freizMzS4oBKQVW2+DB6qLmZWXocUCmozjmgzMzS5oBKwUALyiP5zMzS44BKwVtdfL4GZWaWFgdUCvLu4jMzS50DKgVV2eKsTt3u4jMzS40DKgV5j+IzM0udAyoF7uIzM0ufAyoFHsVnZpY+B1QKDgeUW1BmZqlxQKXgrS4+DzM3M0uLAyoFeXfxmZmlzgGVgqpccZi5B0mYmaXHAZUCTxZrZpY+B1QKBrr4/EFdM7P0OKBS4M9BmZmlzwGVgsNdfG5BmZmlxgGVAg8zNzNLnwMqBQOTxfqDumZm6SlrQEm6WtJ6SS2S7hpi/WWSnpdUkHRDSflSSU9LWiPpZUk3laz7hqQ3JL2Y3JaW8xyGoyrjz0GZmaUtV64DS8oC9wFXAa3ASknLI2JtyWZvAp8E/mzQ7l3AJyLiNUmnAqskrYiIvcn6P4+IR8pV95HKZERVVh4kYWaWorIFFLAMaImIDQCSHgKuAw4HVERsTNYd8coeEb8pub9V0k6gEdhLharKZtyCMjNLUTm7+GYDm0uWW5OyEZG0DMgDr5cUfzHp+vuSpOqj7He7pGZJzW1tbSP9tSNWlc24BWVmlqJyBpSGKBvRMDdJs4BvA5+KiIFX/7uBxcBFQB3w2aH2jYgHIqIpIpoaGxtH8mvfkXwuQ49H8ZmZpaacAdUKzC1ZngNsHe7OkqYAjwL/KSKeGSiPiG1R1A18nWJX4pjLu4vPzCxV5QyolcAiSQsk5YGbgeXD2THZ/kfAtyLi7wetm5X8FHA98EqqtX6HqrLyMHMzsxSVLaAiogDcCawA1gEPR8QaSfdIuhZA0kWSWoEbga9KWpPs/jHgMuCTQwwn/66k1cBqoAH4QrnOYSSmTKyi42DvWFfDzGzcKOcoPiLiMeCxQWWfL7m/kmLX3+D9vgN85yjHvCLlaqairibPrs7usa6Gmdm44ZkkUlJXk2dPZ89YV8PMbNxwQKWkobaa3Qd6iPBIPjOzNDigUlJXk6e70E9XT99YV8XMbFxwQKWkriYPwJ4D7uYzM0uDAyol9UlAeaCEmVk6HFApcQvKzCxdDqiUNNQWpwTc7YAyM0uFAyolbkGZmaXLAZWSSfks1bmMA8rMLCUOqJRIot6zSZiZpcYBlaK62rxbUGZmKXFApai+ptoBZWaWEgdUiupr8uz2fHxmZqlwQKWorsZdfGZmaXFApaiuNs/B3j66egpjXRUzs3e9YQWUpG8Pp+xkNzDdkbv5zMxGb7gtqLNLFyRlgQvTr867W31NcTYJd/OZmY3eMQNK0t2S9gPnSdqX3PYDO4Efn5AavovU1Xo2CTOztBwzoCLiv0XEZOCvImJKcpscEfURcfcJquO7xuEuPgeUmdmoDbeL7x8l1QBI+n1J/13SvDLW613prfn4PJuEmdloDTeg7ge6JL0X+A/AJuBbx9tJ0tWS1ktqkXTXEOsvk/S8pIKkGwatu03Sa8nttpLyCyWtTo75FUka5jmUXW11jnw240ESZmYpGG5AFSIigOuAv46IvwYmH2uHZCDFfcA1wBLgFklLBm32JvBJ4HuD9q0D/hJ4H7AM+EtJ05PV9wO3A4uS29XDPIeyk0R9bd5dfGZmKRhuQO2XdDfwceDRJHyqjrPPMqAlIjZERA/wEMWAOywiNkbEy0D/oH0/Avw8IvZERDvwc+BqSbOAKRHxdBKY3wKuH+Y5nBD+sK6ZWTqGG1A3Ad3Av46I7cBs4K+Os89sYHPJcmtSNhxH23d2cv+4x5R0u6RmSc1tbW3D/LWjV1fjFpSZWRqGFVBJKH0XmCrpo8ChiDjeNaihrg3FMOt1tH2HfcyIeCAimiKiqbGxcZi/dvTqa/IeJGFmloLhziTxMeA54EbgY8Czgwc1DKEVmFuyPAfYOsx6HW3f1uT+OznmCVFXU80eD5IwMxu14Xbx/Ufgooi4LSI+QfH60ueOs89KYJGkBZLywM3A8mH+vhXAhyVNTwZHfBhYERHbKF4PuzgZvfcJKuwDw/W1eQ709HGot2+sq2Jm9q423IDKRMTOkuXdx9s3IgrAnRTDZh3wcESskXSPpGsBJF0kqZViy+yrktYk++4B/gvFkFsJ3JOUAdwB/B3QArwO/GSY53BC+MO6ZmbpyA1zu59KWgF8P1m+CXjseDtFxGODt4uIz5fcX8mRXXal2z0IPDhEeTNwzjDrfcId/rBuZw+zp00c49qYmb17HTOgJC0EZkbEn0v6XeD9FAcqPE1x0IQNUl870ILyQAkzs9E4Xhffl4H9ABHxw4j4TET8KcVW0ZfLXbl3ozrPaG5mlorjBdT85IO0R0i62eaXpUbvcodbUB7JZ2Y2KscLqAnHWOcLLEOYXJ2jKisPkjAzG6XjBdRKSf92cKGkTwOrylOldzdJyXRHvgZlZjYaxxvF9yfAjyTdyluB1ATkgX9Vzoq9m9XVVPsalJnZKB0zoCJiB3CJpMt5a2j3oxHxRNlr9i5W7/n4zMxGbVifg4qIJ4Eny1yXcaOuJs/m9q6xroaZ2bvacGeSsBGYXz+JTbu7+K+PraO74CmP3qntHYdY3dox1tUwszEy3JkkbATu+OBCdh/o4YFfbeCfXtvFl29ayntOOeb3O6au0NfPazs7WbN1H2u2dtDfH/zJh85kejLTxUjt7uymeVM7z29q5/k325k6Mc/HmuZw+eIZVGUzdBf6eO6NPby0eS8Xzqtj2YI6spl39mXHW/Ye5P6nWnh4ZSuF/n7+x60XcPU5s97Rsbp6CuQyGfI5vxcze7dR8Xv/xrempqZobm4+4b/38XU7+Oz/epl9BwtcdfZMbrhwDh9Y2EChP3h+Uzu/fn03AFeeNYP3zplGJnlB37HvEGu37mN/d4Hu3j66C/0c6u3jQHcfXb0FJlfnuOzMRs45derhfQD6+4OVG/ew/KWtPLZ6G+1dvQBMqMrQ1x/MmDyB+269gKVzpw2r/lv3HuSnr2znJ69so3lTOxGQz2Y4e/YUWtsP0ra/m4baas6ZPYXn3thDV89brcWG2mquOecUPtY0l3PnTD3iuG37u3llawfvW1DHpPxb75He3N3F/b98nUdWFb8K7Mamuby6bR+vbNnHNz51EZcsbBhWvbsLffxi7U7+ftVmfvWbNvoDavJZpk3Kc/Hp9dx5xUIWNNQM61gnQkSweksHO/d1c/niGUcN9kO9fazbto+a6hxnzjyxb3hGqrO7wPOb2jnY20d1rvgG4YzGWmZOefsnV/Yf6qW2Okdx/ufj6+jq5TvPbuK0ukl85OxTRv3mo78/2LL3IBt3H2Dj7i62dxzkmnNmcc7sqcffeQQigpdbO2icXM2pFTANWnehj394aRtN86Yz/wT/P0haFRFNx93OAVVeuzq7+ZsnWvjxi1to7+qlviZPZ3eB7kL/4ReiYnhUc/apU3h1+362dRw66vHyuQy9ff1EQENtnovm19HV00fb/m62dRykvauXiVVZPrRkJlcunsE5s6ewoKGWNVs7uOM7z7Nz/yE+e/ViZk2dyIa2Tt7YfYDqXJY50ycyZ/pEegr9NG9sZ+WmPWxoOwDA4lMmc/U5p/CBRQ2cfepUJlRlKfT189T6Nh5auZkNbZ381hn1XLF4BuefNp2nX9/NY6u38firOzjU28+yBXX8m/cvoGFyNd/69UYeXb2N3r6gJp/ld86dxVVLZvLTV7bz45e2kpX42EVzuOODC5k9bSJ7u3q46avP0Nrexfdvv5jz5rwVrv39wYZdB3hx8142tHWyveMQ2zoOsW77PvZ29TJr6gSuWzqbmnyW9q5e2jq7+fna7fQU+rl+6Ww+tGQma7Z28PymvWzafYBlC+r48Nmn8NtnNlJTnW7nQn9/8Pyb7Ty1vg2peJ2yribPb3bs5x9f3sam3V2HH+vPfXQJly5soC95w7FizXZWbWpn3bZ99PYV/19/9/zZfPaaxUO+4A/W1x/s7ephUj7HhKrMMYMgIni9rZPH1+3ktZ2dnDVrChfOm86SWVPI5zJEBH39QTajI45zoLvAi5v38uwbe/h1yy5e3LyXQv+Rry1VWXFj01z+3w+ewexpE1m5sZ37n2rhyfVtnDVrCp+6ZD7XLj2VbEasfGMPP1u7g32HevnoebP4wKJGchnxDy9v455/WMuuzuLHOOpr8tzYNJffvWA2i2bUHjfkDvb0sXH3Ad7YdYB12/bx4ua9vLR5L/sOFY7YLpsRf/Dbp/NHVy6iOpcd8lgdXb1s2NXJG7sOsGl3F3PrJnH5exqpr61+27avbOngnn9cy3NvFOe8nj1tIssW1PHB9zRy1ZKZR7xR23eol1e2dHB6Qy2nTD3+33ek+vuDf3h5K3+1Yj2t7QeZUJXh7mvO4uMXzzviDW85OaBKjGVADegp9PPEqzt5bPU2GmqruXRhPcsW1NHXHzy5fie/WLuT3+zYz5JTp/DeOdM4d85Upk/KU53LUJ3LMDGfZWJVllw2w+7Obn71WhtPvtrGy617mTqxisbJ1TROrubi0+vf9oQfsLerh3//8Es8/upbE9OfMmUCvX39R4w6nDapiqZ507lofh1XLZnJ6Y217+ic9x/q5QcrN/P1f97Ilr0HgeIHmX/vwjm8f2EDP1u7nUdf3saBnj4mVmW59X2n8W8vO/1tL7o79h3i9+7/NR0Hew+/CPVH8PrOzsMvLLmMmDllArOmTmBefQ3XLj2V9y9seFtrpG1/Nw/86nW+/cwmDvX2k8uIs2ZNYW7dRJ5+fTftXb3ksxnqavLkcxmqsqI6l6WmOsvEfI5JVVkm5rNMqMoWX+wRQRBR7E5s7+qlo6uXvgjqavLU1+Tpj+DJ9W207e8mmxERwcBrdzYjLjmjnn953qlUV2UOv2g0zZvOxt0H2NXZQ3UuwwWnTee9c6fx3jlTeXlLB1/7pzfIZcXHL55HNiN2dXaz50AvkyfkaJxcTUNtnt2dPby4eS+rt3Qcbt1KUJvPMa9hEmc01nJ6Qy1BsOdAD7sP9LC6tYM39xTDcvqkqsOt8KqsyEj0JG+O8rkMMyZXMzN5/qzZuo++/iAjOHf2VC5Z2MAlZ9QzfVKe7kI/3b19PPbKNn6wstg6PqOxlle376euJs/1S2fzzy27WL9jP9MnVdHXH+w7VKA6l2FCVZaOg8U3dvPqJ/H8m3s5b85Uvnj9uezp6uG7z2ziF+t20B8wY3I1ly5s4MyZk9m69yCb9nTR2t5Fd28/vX3F28D5DDz2Z86czNK50zhvzlROb6hhXn0NE6oyfOHRdTyyqpVFM2q59X2nEUChL2jv6uHV7ftZu3Uf2/e9/Y2kBBecNp0LTpvGpHyOifksr+3o5IcvtDJ9Up5/d8VCImDlxj2s3LiHXZ091OSzfOScUzhz5mR+ub6NlRv3HA73mVOqee+caZw6bSKTJ+SSWxWTJ+SYMqEKgJadnfxmx37e2HWAupo88+prmFc/if4Itu49yLa9h+g42IskMoI393Tx6vb9LJk1hX93xUJ+0LyZp9a3cenCev70Q2dSOyFHdS5LPpchK5HJQFZHviGpzmVG9SbOAVWiEgKqUvT3B89t3MPkCTkWNNQcDrKungJb2g8iwekNtam+kyr09fOztTvoPFTgd86bRW3JE7urp8Bzb+zhvDnTDs8EP5SNuw5w709fpbO7QH8UA2FefQ3nz53G0tOmcUZj7Yiuee3q7GbT7i6WzJrCxHz2cD1XbWrnifU7aT/QQ29f0JN0r3b19NHV20dXd4FDhT4O9fZzqKRLE0FNPse0SVVMm1RFNiN2dxZf9Lt7+3j/ogY+cvYpXL54BjX5HHu7ethzoIf62uojzvtQbx/f+PVGHnruTc6ePZVrzjmFy98z420vBpt2H+CLj67jZ2t3kMuI+to80ycVW+dt+7vpLvSTz2Y469QpLJ0zlXn1NXQX+jnYU6DjYC8bd3fRsrPz8BuHqROrqK/Js6ChhssXz+DyxTOYPW0i2zsO8cKb7aze0kFfBPlshqpshgPdBXbu72bHvkNEwIXzptM0fzoXzJt++IVzKFv3HuT+p17nxc17ueHCOXysaS4T81kigqc37Ob7z20mn83w4bNn8oFFDeQyGX75mzZ+9EIrq7d08K8vXcAnfmv+EX/rHfsO8dT6nfyflt38umUXuw/0MGVCjnn1NcyZPpFJ+eLsLrmsmDF5Agsaaji9sYbTG2oP/+2H8uT6nfzFD1cf0aORzYiFjbWcNWsyi2dNYWFjLfMbaphbN5HfbO/kF+t28PirO2jZ2cmh3n6gGO6funQBf3j5QqZOfOuxGfhf/NHzW3hs9Tb2dxc4c2YtV541k2Xz69i4+wAvbd7Ly60d7OrsTp77Q9d1+qQqTm+spf1AD5vbuw63tLMZccqUCUybVEUE9EdQnctw2yXzuX7pbDLJG6bvP7eZLzy69ohu+mO5qWku995w3rC2HYoDqoQDysarrp4CE3LZI95QRAT7uwtJ6/voL8BQDMRsRlRlx8cgkv7+4EBPgcnHCMmR6O3rp+NgL7mMyGbEhKrssB+r/v7gUKEPoWMGIRT/DvsO9jLjGF22EcGBnuJ2+w8V2H+ol96+4IwZNTTWVh9u4fT1F1tOVdkMjZOrh/3GbXvHIdZu6+BQbz/dhT56Cv309UNfBP2DknHhjFouHeY14aEMN6A8is/sXWyorlxJx2zFlJpQdewXznebTEaphRNAVTZDwxDXlIZbl6H+PkMpdhkf+28hidrq3BE9EEPJZsTcuknDrueAU6ZOKMs1r9EYH2+bzMxs3HFAmZlZRXJAmZlZRXJAmZlZRSprQEm6WtJ6SS2S7hpifbWkHyTrn5U0Pym/VdKLJbd+SUuTdU8lxxxYN6Oc52BmZmOjbAElKQvcB1wDLAFukbRk0GafBtojYiHwJeBegIj4bkQsjYilwMeBjRHxYsl+tw6sj4idmJnZuFPOFtQyoCUiNkRED/AQcN2gba4DvpncfwS4Um+fq+QW4PtlrKeZmVWgcgbUbGBzyXJrUjbkNhFRADqA+kHb3MTbA+rrSffe54YINAAk3S6pWVJzW1vbOz0HMzMbI+UMqKGCY/C0FcfcRtL7gK6IeKVk/a0RcS7wgeT28aF+eUQ8EBFNEdHU2Ng4spqbmdmYK2dAtQJzS5bnAFuPto2kHDAV2FOy/mYGtZ4iYkvycz/wPYpdiWZmNs6UM6BWAoskLZCUpxg2ywdtsxy4Lbl/A/BEJJMDSsoAN1K8dkVSlpPUkNyvAj4KvIKZmY07ZZuLLyIKku4EVgBZ4MGIWCPpHqA5IpYDXwO+LamFYsvp5pJDXAa0RsSGkrJqYEUSTlngF8DflusczMxs7Hg2czMzO6GGO5u5Z5IwM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OK5IAyM7OKVNaAknS1pPWSWiTdNcT6akk/SNY/K2l+Uj5f0kFJLya3/1myz4WSVif7fEWSynkOZmY2NsoWUJKywH3ANcAS4BZJSwZt9mmgPSIWAl8C7i1Z93pELE1uf1BSfj9wO7AouV1drnMwM7OxU84W1DKgJSI2REQP8BBw3aBtrgO+mdx/BLjyWC0iSbOAKRHxdEQE8C3g+vSrbmZmY62cATUb2Fyy3JqUDblNRBSADqA+WbdA0guSfinpAyXbtx7nmABIul1Ss6Tmtra20Z2JmZmdcOUMqKFaQjHMbbYBp0XE+cBngO9JmjLMYxYLIx6IiKaIaGpsbBxBtc3MrBKUM6Bagbkly3OArUfbRlIOmArsiYjuiNgNEBGrgNeBM5Pt5xznmGZmNg6UM6BWAoskLZCUB24Glg/aZjlwW3L/BuCJiAhJjckgCySdTnEwxIaI2Absl3Rxcq3qE8CPy3gOZmY2RnLlOnBEFCTdCawAssCDEbFG0j1Ac0QsB74GfFtSC7CHYogBXAbcI6kA9AF/EBF7knV3AN8AJgI/SW5mZjbOqDgYbnxramqK5ubmsa6GmZkBklZFRNPxtvNMEmZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEpbwzgAAAJa0lEQVQcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpEcUGZmVpHKGlCSrpa0XlKLpLuGWF8t6QfJ+mclzU/Kr5K0StLq5OcVJfs8lRzzxeQ2o5znYGZmYyNXrgNLygL3AVcBrcBKScsjYm3JZp8G2iNioaSbgXuBm4BdwL+MiK2SzgFWALNL9rs1IprLVXczMxt75WxBLQNaImJDRPQADwHXDdrmOuCbyf1HgCslKSJeiIitSfkaYIKk6jLW1czMKkw5A2o2sLlkuZUjW0FHbBMRBaADqB+0ze8BL0REd0nZ15Puvc9J0lC/XNLtkpolNbe1tY3mPMzMbAyUM6CGCo4YyTaSzqbY7ff/lKy/NSLOBT6Q3D4+1C+PiAcioikimhobG0dUcTMzG3vlDKhWYG7J8hxg69G2kZQDpgJ7kuU5wI+AT0TE6wM7RMSW5Od+4HsUuxLNzGycKWdArQQWSVogKQ/cDCwftM1y4Lbk/g3AExERkqYBjwJ3R8Q/D2wsKSepIblfBXwUeKWM52BmZmOkbAGVXFO6k+IIvHXAwxGxRtI9kq5NNvsaUC+pBfgMMDAU/U5gIfC5QcPJq4EVkl4GXgS2AH9brnMwM7Oxo4jBl4XGn6ampmhu9qh0M7NKIGlVRDQdbzvPJGFmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhXJAWVmZhWprAEl6WpJ6yW1SLpriPXVkn6QrH9W0vySdXcn5eslfWS4xzQzs/GhbAElKQvcB1wDLAFukbRk0GafBtojYiHwJeDeZN8lwM3A2cDVwP+QlB3mMc3MbBwoZwtqGdASERsiogd4CLhu0DbXAd9M7j8CXClJSflDEdEdEW8ALcnxhnNMMzMbB3JlPPZsYHPJcivwvqNtExEFSR1AfVL+zKB9Zyf3j3dMACTdDtyeLHZKWj/C+jcAu0a4z3jjx6DIj0ORHwc/BgNG+zjMG85G5QwoDVEWw9zmaOVDtfgGH7NYGPEA8MCxKngskpojoumd7j8e+DEo8uNQ5MfBj8GAE/U4lLOLrxWYW7I8B9h6tG0k5YCpwJ5j7DucY5qZ2ThQzoBaCSyStEBSnuKgh+WDtlkO3JbcvwF4IiIiKb85GeW3AFgEPDfMY5qZ2ThQti6+5JrSncAKIAs8GBFrJN0DNEfEcuBrwLcltVBsOd2c7LtG0sPAWqAA/GFE9AEMdcwyncI77h4cR/wYFPlxKPLj4MdgwAl5HFRssJiZmVUWzyRhZmYVyQFlZmYVyQE1yMk6lZKkuZKelLRO0hpJf5yU10n6uaTXkp/Tx7qu5ZbMWvKCpH9MlhckU3G9lkzNlR/rOpabpGmSHpH0avKc+K2T9Lnwp8n/wyuSvi9pwsnwfJD0oKSdkl4pKRvy76+irySvmS9LuiCtejigSpzkUykVgH8fEWcBFwN/mJz7XcDjEbEIeDxZHu/+GFhXsnwv8KXkMWinOEXXePfXwE8jYjHwXoqPx0n1XJA0G/gjoCkizqE4MOtmTo7nwzcoTjNX6mh//2sojrReRHFyhPvTqoQD6kgn7VRKEbEtIp5P7u+n+II0myOno/omcP3Y1PDEkDQH+BfA3yXLAq6gOBUXnByPwRTgMoqjbImInojYy0n2XEjkgInJ5zQnAds4CZ4PEfEriiOrSx3t738d8K0oegaYJmlWGvVwQB1pqOmZZh9l23ErmVX+fOBZYGZEbINiiAEzxq5mJ8SXgf8A9CfL9cDeiCgkyyfDc+J0oA34etLV+XeSajjJngsRsQX4/4E3KQZTB7CKk+/5MOBof/+yvW46oI40nOmZxjVJtcD/Av4kIvaNdX1OJEkfBXZGxKrS4iE2He/PiRxwAXB/RJwPHGCcd+cNJbnGch2wADgVqKHYnTXYeH8+HE/Z/kccUEc6qadSklRFMZy+GxE/TIp3DDTXk587x6p+J8ClwLWSNlLs3r2CYotqWtLFAyfHc6IVaI2IZ5PlRygG1sn0XAD4EPBGRLRFRC/wQ+ASTr7nw4Cj/f3L9rrpgDrSSTuVUnKt5WvAuoj47yWrSqejug348Ymu24kSEXdHxJyImE/xb/9ERNwKPElxKi4Y548BQERsBzZLek9SdCXFWV1OmudC4k3gYkmTkv+PgcfhpHo+lDja33858IlkNN/FQMdAV+BoeSaJQST9DsV3zQNTKX1xjKt0Qkh6P/BPwGreuv7yFxSvQz0MnEbxH/bGiBh88XTckfRB4M8i4qOSTqfYoqoDXgB+PyK6x7J+5SZpKcWBInlgA/Apim9oT6rngqT/DNxEcZTrC8C/oXh9ZVw/HyR9H/ggxa/V2AH8JfC/GeLvn4T331Ac9dcFfCoimlOphwPKzMwqkbv4zMysIjmgzMysIjmgzMysIjmgzMysIjmgzMysIjmgzE4QSX2SXiy5pTY7g6T5pTNPm40HZfvKdzN7m4MRsXSsK2H2buEWlNkYk7RR0r2SnktuC5PyeZIeT75j53FJpyXlMyX9SNJLye2S5FBZSX+bfH/RzyRNTLb/I0lrk+M8NEanaTZiDiizE2fioC6+m0rW7YuIZRQ/kf/lpOxvKH6NwXnAd4GvJOVfAX4ZEe+lOEfemqR8EXBfRJwN7AV+Lym/Czg/Oc4flOvkzNLmmSTMThBJnRFRO0T5RuCKiNiQTNi7PSLqJe0CZkVEb1K+LSIaJLUBc0qn10m+IuXnyZfJIemzQFVEfEHST4FOilPV/O+I6CzzqZqlwi0os8oQR7l/tG2GUjofXB9vXWP+FxS/KfpCYFXJTNxmFc0BZVYZbir5+XRy/9cUZ1UHuBX4P8n9x4E7ACRlk2/AHZKkDDA3Ip6k+EWM04C3teLMKpHfSZmdOBMlvViy/NOIGBhqXi3pWYpvGm9Jyv4IeFDSn1P8httPJeV/DDwg6dMUW0p3UPzG16Fkge9Imkrxi+W+lHx9u1nF8zUoszGWXINqiohdY10Xs0riLj4zM6tIbkGZmVlFcgvKzMwqkgPKzMwqkgPKzMwqkgPKzMwqkgPKzMwq0v8Fn/PcJaMofrMAAAAASUVORK5CYII=\n", "text/plain": [ - "" + "
    " ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], @@ -377,12 +376,14 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfcAAAFkCAYAAAA9h3LKAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAPYQAAD2EBqD+naQAAIABJREFUeJzt3XmcXvPd//HXNxE7kba0Wty920xy2x7IWIIKaioRDU1R\nnSwaJBG7qTXaUu5bqZbU0lS1ahuGUvwokoq1QYRMhUiYmRRB7Ymxk0y+vz++U5E0ItfMNXPmOtfr\n+XjMozNnruXjNDPv+ZzzXUKMEUmSlB/dsi5AkiQVl+EuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklS\nzhjukiTljOEuSVLOGO6SJOWM4S5JUs4UHO4hhK+GEK4OIbwRQng/hDAzhNCvI4qTJEmFW6WQB4cQ\n1gMeBO4GBgJvABXAguKXJkmS2iIUsnFMCOEcYMcY464dV5IkSWqPQi/LDwEeCyH8OYTwagihPoQw\nuiMKkyRJbVNo5/4BEIHzgBuB7YELgMNijFcv5/FfJF2+fw74sAj1SpJULlYHvg5MjjG+WcgTCw33\nj4DpMcZdPnXsAmDbGOPOy3n8MOCaQgqSJElLGR5jvLaQJxQ0oA54GZizzLE5wPc/4/HPAdTW1rLp\nppsW+Fb5U1NTw4QJE7IuI3OehyU8F4nnYQnPReJ5gDlz5jBixAhozdJCFBruDwJ9lznWF3j+Mx7/\nIcCmm25Kv37OluvZs6fnAc/Dp3kuEs/DEp6LxPOwlIJvaxc6oG4C0D+EMD6E8M3Wy+6jgYsLfWNJ\nktQxCgr3GONjwFCgGngS+AlwbIzxug6oTZIktUGhl+WJMd4B3NEBtUiSpCJwbflOVF1dnXUJXYLn\nYQnPReJ5WMJzkXge2qegqXAFv3hac37GjBkzHBghSVIB6uvrqaysBKiMMdYX8lw7d0mScsZwlyQp\nZwx3SZJyxnCXJClnDHdJknLGcJckKWcMd0mScsZwlyQpZwx3SZJyxnCXJClnDHdJknLGcJckKWcM\nd0mScsZwlyQpZwx3SZJyxnCXJClnDHdJknLGcJckKWcMd0mScsZwlyQpZwx3SZJyxnCXJClnDHdJ\nknLGcJckKWcMd0mScsZwlyQpZwx3SZJyxnCXJClnDHdJknLGcJckKWcMd0mScsZwlyQpZwx3SZJy\nxnCXJClnDHdJUteycCG8+WbWVZQ0w12S1HU89hhstx2MGpV1JSXNcJckZe+99+D442GHHaBbNzjj\njKwrKmmrZF2AJKnMTZ4M48bBK6/AOedATQ2sYjy1h527JCkbb7wBBx0EgwbBN78Js2bBiSca7EVQ\nULiHEE4PISxe5mN2RxUnScqhGOGaa2DTTeGvf4XLL4e77koBr6Joy59Hs4A9gND69aLilSNJyrXn\nnkuX4CdPhupq+M1vYIMNlnpIQ0MDc+fOpXfv3lRUVGRTZ4lry2X5RTHG12OMr7V+zC96VZKkfGlp\ngQkTYPPNYfZsuP12uPbapYJ9/vz5DBq0N3379mXw4MH06dOHQYP2ZsGCBRkWXpraEu4VIYSXQghz\nQwi1IYSNi16VJCk/Zs6E/v3TaPjRo+Gpp2Dw4P942LBhI5kyZRpQC8wDapkyZRrV1SM6u+KSV2i4\nTwNGAQOBccB/Aw+EENYqcl2SpFL3wQdw6qlQWQkffggPPQQXXADrrPMfD21oaGDy5DtoabkQGA5s\nDAynpeUCJk++g8bGxs6uvqQVdM89xjj5U1/OCiFMB54HfgBc/lnPq6mpoWfPnksdq66uprq6upC3\nlySVinvvhbFjYd48+PnP4aSTYNVVP/Phc+fObf1swDLf2RWApqamXN9/r6uro66ubqljzc3NbX69\nds03iDE2hxAagN4retyECRPo169fe95KklQKFixI09kuuwwGDEij4fv2/dynffOTkfIPkDr3f7sf\ngN69VxgzJW95DW99fT2VlZVter12zXMPIaxNCvaX2/M6kqQSFyPccEOa3nbjjfD736fufSWCHaBP\nnz4MHDiY7t2PId1zfwGopXv3Yxk4cHCuu/aOUOg891+FEAaEEP4rhLATcDOwEKj7nKdKkvLqxRdh\n333hBz+AnXdOo+HHjk3LyBagrq6Wqqr+wEhgE2AkVVX9qaur7Yiqc63Qy/IbAdcCXwReB6YC/WOM\nbt8jSeVm8WK45BI45RRYe2246SYYOrTNL9erVy8mTbqdxsZGmpqanOfeDoUOqHMEnCQpdedjxqQR\n8IcdltaEX2+9orx0RUWFod5Ori0vSVp5H32URr9vvXXac/2BB1L3XqRgV3G4Or8kaeU8+GDq1hsb\nYfz4NId99dWzrkrLYecuSVqxt9+GI46Ab30L1l0X6uvhzDMN9i7Mzl2S9NluvTUFe3MzXHhh+rx7\n96yr0uewc5ck/adXXoEDDkhT3LbeOq0Hf/TRBnuJsHOXJC0RY1pd7sQT03Kx112X5q+H8PnPVZdh\n5y5JShob4dvfToPmhg6FOXPgwAMN9hJkuEtSuVu4EM4+G7bcMm30ctdd8Kc/wRe+kHVlaiMvy0tS\nOXv00SV7rB9/PJx+Oqy5ZtZVqZ3s3CWpHL37LtTUQP/+sMoqKeR/+UuDPSfs3CWp3EyaBOPGwWuv\nwbnnwrHHpoBXbti5S1K5eP11GDEC9toLKipg1qx0Kd5gzx3/H5WkvIsRamvTZfgY4corYeRIR8Hn\nmJ27JOXZs8/CwIFw0EHpf+fMSZ8b7LlmuEtSHi1aBOefD1tsAc88A3fcAddcAxtskHVl6gSGuyTl\nzeOPp1HwJ5yQFqR56ql0n11lw3CXpLz44AM45RTYdlv4+GN4+GH4zW9g7bWzrkydzAF1kpQH99wD\nY8fCiy+m7VhPPBF69Mi6KmXEzl2SStn8+XDoobDHHrDRRvDEE3DqqQZ7mbNzl6RSFCPccEPahvWj\nj+DSS1PId7Nnk527JJWeF16AffZJO7btskua3jZmjMGuT/gvQZJKRUsLXHwxbLYZ1NfDzTfDjTfC\nhhtmXZm6GMNdkkrBU0/Bt76VLsOPHAmzZ8P3vpd1VeqiDHdJ6so++ghOOw222Qbeegv+/neYOBF6\n9sy6MnVhDqiTpK5q6tR0L33uXBg/Po2CX221rKtSCbBzl6SuprkZDj88DZZbbz34xz/gjDMMdq00\nO3dJ6kpuuQWOPBLefhsuuiiFfPfuWVelEmPnLkldwb/+BfvtB0OHQr9+acDcUUcZ7GoTw12SsrR4\ncVqAZrPN0j3266+HW2+FjTfOujKVMMNdkrLyzDOw++5w2GGpa58zB37wA/daV7sZ7pLU2T7+GM46\nC7baCl56Ce6+Gy67DL7whawrU044oE6SOtMjj6TpbbNnp/3WTz8d1lgj66qUM3buktQZ3n0XjjsO\ndtwRVl0VHnsMzjnHYFeHsHOXpI52xx1pStvrr8Ovfw3HHAOr+OtXHcfOXZI6ymuvwbBhsPfe0Lcv\nzJoFP/6xwa4O578wSSq2GOGqq1KQh5A+HzHCUfDqNHbuklRM//wn7LknjBoFe+2VpreNHGmwq1MZ\n7pJUDIsWpfvpW2wBjY1w551QWwvrr591ZSpDhrsktVd9PeywA5x8Mowbl+6tDxqUdVUqY4a7JLXV\n++/DSSfB9tvDwoXw8MNw/vmw9tpZV6Yy165wDyGcEkJYHEI4v1gFSVJJuPtu2HJLuPBC+N//hRkz\nUshLXUCbwz2EsB0wFphZvHIkqYt78004+GCoqoJNNoEnnoDx46FHj6wrkz7RpnAPIawN1AKjgbeK\nWpEkfY6GhgbuvPNOGhsbO+9NY4TrroNNN4Wbb4Y//hHuuQf69Om8GqSV1NbO/bfAbTHGe4pZjCSt\nyPz58xk0aG/69u3L4MGD6dOnD4MG7c2CBQs69o3nzYPvfheqq2HXXdP0tkMPdXqbuqyCwz2E8ENg\na2B88cuRpM82bNhIpkyZRrpwOA+oZcqUaVRXj+iYN2xpgYsuSnutz5wJt9wCN9wAG27YMe8nFUlB\nK9SFEDYCfgNUxRgXdkxJkvSfGhoamDz5DlKwD289OpyWlsjkySNpbGykoqKieG84axaMHp12cTvi\nCDj7bFh33eK9vtSBCl1+thJYH6gP4ZPrUd2BASGEo4DVYoxx2SfV1NTQs2fPpY5VV1dTXV3dhpIl\nlaO5c+e2fjZgme/sCkBTU1Nxwv3DD9Ne6+ecAxUVMHUq7Lxz+19XWoG6ujrq6uqWOtbc3Nzm1wvL\nyeLPfnAIawH/tczhK4A5wDkxxjnLPL4fMGPGjBn069evzUVKUkNDA3379mXpzp3Wr0fS0NDQ/nB/\n4AEYOzYtIXvqqWkU/Gqrte81pTaqr6+nsrISoDLGWF/Icwvq3GOM7wGzP30shPAe8OaywS5JxdSn\nTx8GDhzMlCnH0NISSR37/XTvfixVVYPbF+xvvZVWl7v00rTf+l/+AptvXqzSpU5XjBXqVr71l6R2\nqKurpaqqPzAS2AQYSVVVf+rqatv+ojffnAbM1dXBb3+bLsMb7Cpx7d7yNcb47WIUIkmfp1evXkya\ndDuNjY00NTXRu3fvtnfs//oXHHVUCvchQ2DiRNhoo+IWLGXE/dwllZyKioq2h/rixfCHP6Q14ddY\nA/78Z9h/f+esK1fcOEZS+Xj6adhtt7Rz2wEHpMVoDjjAYFfuGO6S8u/jj9PmLlttBS+/nJaN/eMf\noVevrCuTOoSX5SXl27RpaTGap59Ol+J/9rN0OV7KMTt3Sfn0zjtwzDGw004pzGfMgF/8wmBXWbBz\nl5Q/t98Ohx+etmc977wU8t27Z12V1Gns3CXlx6uvpp3bvvvdNHd91iyoqTHYVXbs3CWVvhjhiivg\n+OOhWze4+moYPtxR8Cpbdu6SStvcufCd78Ahh8Dee6fpbSNGGOwqa4a7pNK0aBGcey5suWUK+EmT\nUse+/vpZVyZlznCXVHrq62H77dOubYcfnu6tDxyYdVVSl2G4Syod778PJ54I220HLS1pDvt558Fa\na2VdmdSlOKBOUmm46y447LC0wtxZZ6XBcz16ZF2V1CXZuUvq2t58E370I9hzT/j61+GJJ+CUUwx2\naQXs3CV1TTGmPdaPOw4WLoTLLoODD3YUvLQS7NwldT3PP5+mtQ0fDrvvnqa3HXKIwS6tJMNdUtfR\n0gIXXACbbw5PPgm33grXXw9f+UrWlUklxXCX1DU88UTa5KWmBkaNgqeegiFDsq5KKkmGu6Rsffgh\n/OQnUFkJ774LU6fCxRfDuutmXZlUshxQJyk7998PY8fCc8+lfdZPPhlWWy3rqqSSZ+cuqfO99VYK\n9d12S8vFPv44nHaawS4ViZ27pM4TI9x0Exx1FLz3HkycmBam6WafIRWTP1GSOsdLL8HQobD//rDD\nDjB7dloX3mCXis6fKkkda/Fi+N3vYLPN4JFH4MYb4eabYaONsq5Myi3DXVLHmTMHBgyAI46AAw9M\n3fp++7kYjdTBDHdJxffxx3DmmbD11vDaa3DffXDppdCrV9aVSWXBAXWSiuvhh2HMGHjmGTjppDTF\nbfXVs65KKit27pKK4+230yj4nXeGNdeEGTPS1qwGu9Tp7Nwltd9tt6X76gsWwPnnw9FHQ/fuWVcl\nlS07d0lt98oraaDcPvvAllum9eCPO85glzJm5y6pcDHC5ZfD8cfDKqvANddAdbWj4KUuws5dUmGa\nmmCPPeDQQ1PHPmcODBtmsEtdiOEuaeUsXAjnnJMuvz/3HPztb3DllfClL2VdmaRlGO6SPt9jj8F2\n26WtWY86Cp58Er7znayrkvQZDHdJn+2999J99R12SJfdp0+HX/0K1lor68okrYAD6iQt3+TJMG5c\nGhF/9tlQUwM9emRdlaSVYOcuaWlvvAEjR8KgQfCNb6RL8CedZLBLJcTOXVISI1x7bZqn3tICf/oT\njBrlKHipBNm5S0qj3/faC0aMgKqqNL3t4IMNdqlEGe5SOWtpgQkTYPPN03asf/0r1NXBl7+cdWWS\n2sFwl8rVzJnQv38aDX/ooWnp2L33zroqSUVQULiHEMaFEGaGEJpbPx4KIQzqqOIkdYAPPoDx46Gy\nMn3+0ENw4YWwzjpZVyapSAodUPcCcDLQCARgFPD/QghbxxjnFLk2ScV2770wdizMmwc//3kaBb/q\nqst9aENDA3PnzqV3795UVFR0bp2S2qWgzj3GeHuMcVKMcW6MsSnG+FPgXaB/x5QnqSgWLIDRo+Hb\n34YNN0yX5H/60+UG+/z58xk0aG/69u3L4MGD6dOnD4MG7c2CBQsyKFxSW7T5nnsIoVsI4YfAmsDD\nxStJUtHECDfcAJtumv73kkvgvvvgf/7nM58ybNhIpkyZBtQC84BapkyZRnX1iE4qWlJ7FTzPPYSw\nBSnMVwfeAYbGGJ8udmGS2unFF+GII+C222DoULjoIvja11b4lIaGBiZPvoMU7MNbjw6npSUyefJI\nGhsbvUQvlYC2LGLzNLAV0BPYH7gqhDBgRQFfU1NDz549lzpWXV1NdXV1G95e0gotXpw69FNOgbXX\nhr/8Bb7//ZV66ty5c1s/G7DMd3YFoKmpyXCXOkBdXR11dXVLHWtubm7z64UYY7sKCiHcBTTFGA9f\nzvf6ATNmzJhBv3792vU+klbC7NkwZkwaAX/YYWmL1vXWW+mnNzQ00LdvX5bu3Gn9eiQNDQ2Gu9RJ\n6uvrqaysBKiMMdYX8txizHPvBqxWhNeR1FYffZRGv2+9dVob/v77U/deQLAD9OnTh4EDB9O9+zGk\nQH8BqKV792MZOHCwwS6ViELnuf8ihLBLCOG/QghbhBDOJl2vq+2Y8iR9rgcfhG22gbPOgpNPTiPh\nByx7WX3l1dXVUlXVHxgJbAKMpKqqP3V1/phLpaLQe+4bAFcCGwLNwBPAnjHGe4pdmKTP8fbb6b76\n736X9luvr4ctt2z3y/bq1YtJk26nsbGRpqYm57lLJaigcI8xju6oQiQV4NZb00j4t96CCy6AI4+E\n7t2L+hYVFRWGulSiXFteKiWvvAIHHAD77gtbbZUG0B1zTNGDXVJpcz93qRTECJddBieeCD16pJ3b\nDjzQLVklLZedu9TVNTamZWPHjIHvfS/ttf7DHxrskj6T4S51VQsXwtlnp0Fy8+bBXXfB5ZfDF7+Y\ndWWSujgvy0td0aOPpo1ennoKfvzjNId9zTWzrkpSibBzl7qSd9+Fmhro3z8Nkps+Hc4912CXVBA7\nd6mrmDQJxo2D115Ly8bW1MAq/ohKKpydu5S111+HESNgr72gogKefDKNijfYJbWRvz2krMQItbWp\nQ48RrrgCDjrIUfCS2s3OXcrCs8/CwIEpzPfcM01v+9GPDHZJRWG4S51p0SI4/3zYYgt4+mm4/Xa4\n9lrYYIOsK5OUI4a71FkefzyNgj/hhCXT3AYPzroqSTlkuEsd7YMP0u5t226b9l1/+OG02cs662Rd\nmaScckCd1JHuuQfGjoUXX4Qzzkij4FddNeuqJOWcnbvUEebPh0MPhT32gK99DWbOhJ/8xGCX1Cns\n3KViihFuuAGOPjpdgr/00hTy3fw7WlLn8TeOVCwvvAD77JO2Yv3Wt9Je62PGGOySOp2/daT2ammB\niy+GzTaDGTPgppvgL3+Br34168oklSnDXWqPp56CXXZJl+FHjEiL0QwdmnVVksqc4S61xUcfwemn\nwzbbwIIF8MAD8LvfQc+eWVcmSQ6okwo2dWq6lz53bpq/fuqpsPrqWVclSZ+wc5dWVnMzHH54ugy/\n3npQXw9nnmmwS+py7NyllXHLLXDkkfD223DRRSnku3fPuipJWi47d2lFXn4Z9t8/DZLr1y9Nbzvq\nKINdUpdmuEvLs3gx/OEPsOmm8Pe/w3XXwa23wsYbZ12ZJH0uw11a1jPPwO67pzXhv//9NL3twAPd\na11SyTDcpX/7+GM46yzYait46SWYMgX+9Cf4wheyrkySCuKAOgngkUfS9LbZs9N+66edBmuumXVV\nktQmdu4qb+++C8cdBzvumHZse/RROOccg11SSbNzV/m6804YNw5efx1+9Ss49lhYxR8JSaXPzl3l\n57XXYPhwGDwY+vaFWbPg+OMNdkm54W8zlY8Y4aqr4Mc/Tl9feSWMHOkoeEm5Y+eu8vDPf8Kee8Ko\nUbDXXvD003DQQQa7pFwy3JVvixbBr38NW2wBjY1wxx1QWwvrr591ZZLUYQx35dc//gE77AAnnQSH\nHZbure+1V9ZVSVKHM9yVP++/DyefDNttBwsXwrRpMGECrL121pVJUqdwQJ3y5e6707KxL72UtmM9\n8UTo0SPrqiSpU9m5Kx/mz4eDD4aqqrS5yxNPwKmnGuySypKdu0pbjHD99WkBmo8+Sju5HXIIdPPv\nVknly9+AKl3z5sGQIVBdDQMGpN3bRo822CWVPX8LqvS0tMBFF8Hmm6cR8bfcAjfcABtumHVlktQl\nFBTuIYTxIYTpIYS3QwivhhBuDiH06ajipP8waxbsvDMcc0xaXW72bNh336yrkqQupdDOfRfgImAH\noAroAfwthLBGsQtT+WloaODOO++ksbHxP7/54YdpG9ZttoHmZvj732HiROjZs/MLlaQurqABdTHG\nwZ/+OoQwCngNqASmFq8slZP58+czbNhIJk++45NjAwcOpq6ull69eqUgHzMmLSH7k5/A+PGw2moZ\nVixJXVt777mvB0RgfhFqUZkaNmwkU6ZMA2qBeUAtU6ZM49D9D0xbsg4YAF/4Qrq//vOfG+yS9Dna\nPBUuhBCA3wBTY4yzi1eSyklDQ0Nrx14LDG89OpwhLY9y8T0XsPiRteh28cVw+OGOgpekldSeee4T\ngc2AnT/vgTU1NfRc5t5odXU11dXV7Xh75cHcuXNbPxsAwIb8i4s4mv24iVuBdSZOZPeDDsqsPknq\nDHV1ddTV1S11rLm5uc2vF2KMhT8phIuBIcAuMcZ5K3hcP2DGjBkz6NevX5uLVH41NDTQt29fAlcx\nmg84l5P4kNU5mgO4kYtpaGigoqIi6zIlqdPV19dTWVkJUBljrC/kuQV37q3Bvi+w64qCXVoZffr0\n4dBvDeBHUw9hFxZxGQdyArvzTvdTGVg12GCXpDYoKNxDCBOBamAf4L0Qwpdbv9UcY/yw2MUp5z7+\nGM49lz9Mn8bLa6zKtz9YxL1cD1zPwKo0Wl6SVLhCO/dxpNHx9y1z/GDgqmIUpDIxbVqa3jZnDuHE\nE/nqaafx+xdfpKmpid69e9uxS1I7FDrP3eHKap933klz1S++GCor4bHHYOutAaioqDDUJakI3BVO\nnef229OUtjffhPPOg6OPhlX8JyhJxWYnro732mtp57bvfhc22yytD19TY7BLUgfxt6s6Toxw5ZVw\n/PEQAlx9NQwfnj6XJHUYO3d1jLlz4TvfgYMPhsGD017rI0YY7JLUCQx3FdeiRXDuubDlltDUBJMm\npY59/fWzrkySyobhruKpr4ftt0+7to0bl+6tDxyYdVWSVHYMd7Xf++/DiSemYF+8OM1hP/98WHvt\nrCuTpLLkgDq1z5QpcNhh8NJL8H//lwbP9eiRdVWSVNbs3NU2b74Jo0alQXObbAJPPgmnnGKwS1IX\nYOeuwsQIdXVw3HGwcCH88Y9wyCGOgpekLsTOXSvv+edh773TXPXddkvT2w491GCXpC7GcNfna2mB\nCy6AzTdPl99vvRX+/Gf4yleyrkyStByGu1bsySdhp53ScrGjRsFTT8GQIVlXJUlaAcNdy/fhh/DT\nn0K/fmknt6lT005u666bdWWSpM/hgDr9p/vvh7Fj4dlnU8CfcgqstlrWVUmSVpKdu5Z4660U6rvt\nBl/6Ejz+OJx+usEuSSXGzl3JTTfBkUfCe+/BxIlpYZpu/u0nSaXI397l7qWXYOhQ2G8/2GEHmD0b\nDj/cYJekEuZv8HK1eDFccglstllaC/7GG+Hmm2GjjbKuTJLUToZ7OZozB3bdNXXoBx6YuvX99nMx\nGknKCcO9nHz8MZx5Jmy9Nbz6Ktx7L1x6KfTqlXVlkqQickBduXj4YRgzBp55Bk46KU1xW2ONrKuS\nJHUAO/e8e+cdOPpo2HlnWHNNmDEDzjrLYJekHLNzz7O//jXdV1+wAM4/P4V89+5ZVyVJ6mB27nn0\n6qtpoNyQIbDllmk9+OOOM9glqUzYuedJjHD55XDCCSnIr7kGqqsdBS9JZcbOPS+amqCqKu2vPmRI\nmu42bJjBLkllyHAvdQsXwi9/mS6/P/ssTJ4MV16Z1oaXJJUlL8uXshkzYPRoeOKJtN/6GWfAWmtl\nXZUkKWN27qXovffg+ONh++3T19Onw69/bbBLkgA799Lzt7+lHdteeQXOPjt17D16ZF2VJKkLsXMv\nFW+8AQcdBAMHwje+AU8+mVaaM9glScuwc+/qYoRrr03z1Fta4E9/glGjHAUvSfpMdu5d2XPPweDB\nMGIE7LFHmt528MEGuyRphQz3rqilBSZMgM03h1mz4Lbb4Lrr4MtfzroySVIJMNy7mpkzYccd02j4\nQw9Ne61/97tZVyVJKiGGe1fxwQdw6qmw7bbw/vvw0ENw4YWwzjpZVyZJKjEOqOsK7rsPxo6F55+H\n006Dk0+GVVfNuipJUomyc8/SggUwZgzsvnu6nz5zJvzsZwa7JKld7NyzECP85S9w1FHpcvwll6SQ\n7+bfWpKk9is4TUIIu4QQbg0hvBRCWBxC2KcjCsutF1+E730PDjgAdtopDZg77DCDXZJUNG1JlLWA\nx4EjgFjccnJs8WKYOBE22wwefTR17jfdBF/7WtaVSZJypuDL8jHGScAkgBBcTWWltLTw/k47seb0\n6TQfeCA9L7kE1lsv66okSTnlteAONn/+fAbtvQ/jp09nALDe9dcz6IfDWbBgQdalSZJyynDvYMOG\njWTKlGlcSC1/Zx5Qy5Qp06iuHpF1aZKknHK0fAdqaGhg8uQ7gFpgeOvR4bS0RCZPHkljYyMVFRUZ\nVihJyqNOCfeamhp69uy51LHq6mqqq6s74+0zM3fu3NbPBizznV0BaGpqMtwlSdTV1VFXV7fUsebm\n5ja/XqeE+4QJE+jXr19nvFWX8s1vfrP1swdY0rkD3A9A7969O7skSVIXtLyGt76+nsrKyja9XsHh\nHkJYC+jT2Z/4AAAHO0lEQVQN/Huk/DdCCFsB82OML7Spipzq06cPAwcOZsqUY2hpiaSO/X66dz+W\nqqrBdu2SpA7RlgF12wL/AGaQ5rmfB9QDZxSxrtyoq6ulqqo/MBLYBBhJVVV/6upqM65MkpRXbZnn\nfj+Osl9pvXr1YtKk22lsbKSpqYnevXvbsUuSOpSj5TtJRUWFoS5J6hR24JIk5YzhLklSzhjukiTl\njOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5Yzh\nLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5J\nUs4Y7pIk5YzhLklSzhjukiTljOEuSVLOGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOEuSVLO\nGO6SJOWM4S5JUs4Y7pIk5YzhLklSzhjukiTljOHeierq6rIuoUvwPCzhuUg8D0t4LhLPQ/u0KdxD\nCEeGEJ4NIXwQQpgWQtiu2IXlkf9YE8/DEp6LxPOwhOci8Ty0T8HhHkI4EDgPOB3YBpgJTA4hfKnI\ntUmSpDZoS+deA/w+xnhVjPFpYBzwPnBIUSuTJEltUlC4hxB6AJXA3f8+FmOMwBRgx+KWJkmS2mKV\nAh//JaA78Ooyx18F+i7n8asDzJkzp/DKcqi5uZn6+vqsy8ic52EJz0XieVjCc5F4HpbKztULfW5I\njfdKPjiEDYGXgB1jjI986vgvgQExxh2Xefww4JpCi5IkSZ8YHmO8tpAnFNq5vwG0AF9e5viXgVeW\n8/jJwHDgOeDDAt9LkqRytjrwdVKWFqSgzh0ghDANeCTGeGzr1wGYB1wYY/xVoQVIkqTiKrRzBzgf\nuCKEMAOYTho9vyZwRRHrkiRJbVRwuMcY/9w6p/1M0uX4x4GBMcbXi12cJEkqXMGX5SVJUtfm2vKS\nJOWM4S5JUs50aLi7wQyEEHYJIdwaQngphLA4hLBP1jVlIYQwPoQwPYTwdgjh1RDCzSGEPlnX1dlC\nCONCCDNDCM2tHw+FEAZlXVfWQgintP58nJ91LZ0thHB663/7pz9mZ11XVkIIXw0hXB1CeCOE8H7r\nz0u/rOvqTK25uey/icUhhItW9jU6LNzdYOYTa5EGHR4BlPMAh12Ai4AdgCqgB/C3EMIamVbV+V4A\nTgb6kZZyvgf4fyGETTOtKkOtf/SPJf2OKFezSAOUv9L68a1sy8lGCGE94EHgI2AgsClwPLAgy7oy\nsC1L/i18BfgOKT/+vLIv0GED6j5jPvwLpPnw53bIm3ZxIYTFwPdijLdmXUvWWv/Ie420suHUrOvJ\nUgjhTeCEGOPlWdfS2UIIawMzgMOBnwH/iDH+ONuqOlcI4XRg3xhjWXWnyxNCOIe0AuquWdfSlYQQ\nfgMMjjGu9NXODunc3WBGK2E90l+i87MuJCshhG4hhB+S1ol4OOt6MvJb4LYY4z1ZF5KxitZbd3ND\nCLUhhI2zLigjQ4DHQgh/br19Vx9CGJ11UVlqzdPhwGWFPK+jLsuvaIOZr3TQe6pEtF7F+Q0wNcZY\ndvcWQwhbhBDeIV16nAgMbd0+uay0/mGzNTA+61oyNg0YRboMPQ74b+CBEMJaWRaVkW+QruI8A+wJ\n/A64MIQwMtOqsjUU6AlcWciT2rJCndReE4HNgJ2zLiQjTwNbkX5g9weuCiEMKKeADyFsRPoDryrG\nuDDrerIUY/z0uuGzQgjTgeeBHwDldqumGzA9xviz1q9nhhC2IP3Rc3V2ZWXqEODOGOPy9m/5TB3V\nuRe6wYzKRAjhYmAwsFuM8eWs68lCjHFRjPGfMcZ/xBh/QhpIdmzWdXWySmB9oD6EsDCEsBDYFTg2\nhPBx69WdshRjbAYagN5Z15KBl4Fl9wifA2ySQS2ZCyFsQhqA/IdCn9sh4d76l/gMYI9/H2v9Yd0D\neKgj3lNdX2uw7wvsHmOcl3U9XUg3YLWsi+hkU4AtSZflt2r9eAyoBbaKZbx0Zusgw96koCs3DwJ9\nlznWl3QloxwdQrqdfUehT+zIy/JuMAO03jfrDfy7E/lGCGErYH6M8YXsKutcIYSJQDWwD/BeCOHf\nV3WaY4xlsx1wCOEXwJ2knRTXIQ2U2ZV0f7FsxBjfA5YabxFCeA94M8a4bOeWayGEXwG3kQLsa8AZ\nwEKgLsu6MjIBeDCEMJ407WsHYDQwJtOqMtDaEI8CrogxLi70+R0W7m4w84ltgXtJI8Mjae4/pMER\nh2RVVAbGkf7771vm+MHAVZ1eTXY2IP1/vyHQDDwB7OlocaB814HYCLgW+CLwOjAV6B9jfDPTqjIQ\nY3wshDAUOIc0NfJZ4NgY43XZVpaJKmBj2jjuwo1jJEnKGdeWlyQpZwx3SZJyxnCXJClnDHdJknLG\ncJckKWcMd0mScsZwlyQpZwx3SZJyxnCXJClnDHdJknLGcJckKWf+P1M+eDq5SfFoAAAAAElFTkSu\nQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAW4AAAD8CAYAAABXe05zAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4xLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvDW2N/gAAGz1JREFUeJzt3Xl01fWZx/H3A9oFXFCh7iRapzUsypKyKhVBq9a2jlMcOmE8ehyRRSuH1tEW65Sx0CpaUUEggIISVxRRlK0sVWRNWGQfRQmKKODCYhAhfOePJ45DZbkh93d/d/m8zslJ7o+be5/rOX54+P6+i4UQEBGRzFEr7gJERKR6FNwiIhlGwS0ikmEU3CIiGUbBLSKSYRTcIiIZRsEtIpJhFNwiIhlGwS0ikmGOiuJF69evH/Lz86N4aRGRrFRWVrY1hNAgkedGEtz5+fmUlpZG8dIiIlnJzMoTfa6GSkREMoyCW0Qkwyi4RUQyjIJbRCTDKLhFRDKMgltEJMMouEVEMoyCW0SkprZvh9/8BjZtSsnbKbhFRGpi0iRo3BgeeACmT0/JWyq4RUSOxEcfQdeu8LOfQb16MG8eXHttSt5awS0iUh0hwJgxUFAAEybA3XdDWRm0bp2yEiLZq0REJCutWwc33QQzZsAFF8DIkXDuuSkvI6HgNrP1wA6gEtgbQiiMsigRkbSydy8MHgx33QVHHQXDhkH37lArnkGL6rxrxxBCM4W2iOSUJUt8GOS22+CSS2D1aujRY7/QLimB/Hy/lJ/vj6OkMW4RkQPZtQvuuAN+9CPYuBGeew5efBFOP32/p5WUePNdXu7D3+Xl/jjK8E40uAMwzczKzKx7dOWIiKSBWbPgvPPgnnvguuu8y/7lL8HsG0/t1w8qKva/VlHh16OSaHC3DyG0AC4HeptZh398gpl1N7NSMyvdsmVLUosUEUmJTz+F//gPuPhib59nzIBRo+CEEw76Kxs2VO96MiQU3CGED6q+bwYmAK0O8JziEEJhCKGwQYOETt8REUkPIcD48T7Fb8wYuP12WL7cA/wwGjas3vVkOGxwm1ldMzv2q5+BS4EV0ZUkIpJCGzfCP/8zdOni49eLFsFf/gLf/W5Cvz5gANSps/+1OnX8elQS6bhPBuaY2TJgIfBKCGFKdCWJiKTAvn0wfDg0agTTpsGgQbBgATRvXq2XKSqC4mLIy/Mh8Lw8f1xUFFHdgIUQkv6ihYWFQYcFi0jaWrsWbrwRXn8dOnWCESPg+9+PtSQzK0t0urWmA4pI7vjySx/DOO88WLECHnvMN4aKObSrS0veRSQ3LFjgM0ZWrIB//Vd48EE4+eS4qzoi6rhFJLvt3Al9+kDbtvDZZ/DSS/D00xkb2qCOW0Sy2ZQpvjy9vBx694aBA+G44+KuqsbUcYtI9tm6Fbp1g8sv97l5c+bAkCFZEdqg4BaRbBKCbxJSUADPPuu7+S1ZAu3bx11ZUmmoRESyQ3m5D4tMmQJt2vhe2U2axF1VJNRxi0hmq6z0GSKNG/u87Ice8qGRLA1tUMctIpls+XKf4rdwIVxxhR9wEOUmIWlCHbeIZJ4vvoA//AFatIB334Unn/TT1nMgtEEdt4hkmtdf9+Xqa9f6qep//SucdFLcVaWUOm4RyQzbtkHPntChA+zeDVOnwtixORfaoOAWkUwwcaLv4ldcDH37+rL1Sy+Nu6rYKLhFJH19+KHvk33VVVC/PsyfD/ffD3Xrxl1ZrBTcIpJ+QoDRo30hzcsv+1L10lI/uFd0c1JE0szbb/sx6bNmwY9/7MMjP/hB3FWlFXXcIpIe9uzxU9WbNoXFiz2wZ85UaB+AOm4RiV9ZmS+kWboUrr4aHn4YTjst7qrSljpuEYlPRQXcdhu0agUffQTPP+9fCu1DUsctIvH429/gppvgnXd8TPuee6BevbirygjquEUktT75BK6/Hi65BGrXhtmz/bBehXbCFNwikhohwDPP+BS/cePg97+HZct85ohUi4ZKRCR6770HvXr5RlCFhTBtGpx/ftxVZSx13CISnX37YOhQX64+c6avepw3T6FdQ+q4RSQaq1b5Ln5z5/q+IsOHw1lnxV1VVlDHLSLJtXs39O8PzZvDmjXw+ON+nJhCO2nUcYtI8syb5wtpVq2Cf/s3eOAB+N734q4q66jjFpGa27EDfv1rP019xw545RU/bV2hHQkFt4jUzCuv+EG9Q4bAzTfDypV+/qNERkMlInJkNm+GPn3gqad81sgbb0DbtnFXlRPUcYtI9YTgNxwLCnxfkf79YckShXYKqeMWkcS9+67vLzJ9OrRrByNHerctKaWOW0QOb+9eP029SRM/PmzoUD9tXaEdC3XcInJoy5b5FL/SUrjySnjkETjzzLirymnquEXkwHbt8o2gWraEDRt8g6iXXlJopwF13CLyTX//uy9Xf+st34L1vvvgxBPjrkqqqOMWka999pkfanDRRVBZ6TchH31UoZ1mFNwi4l54waf4jR7tx4ktXw6dO8ddlRyAhkpEct0HH/iKxwkToFkz3zO7Zcu4q5JDSLjjNrPaZrbEzCZFWZBILispgfx8qFXLv5eURPhm+/ZBcbF32ZMn+5mPCxcqtDNAdTruW4HVwHER1SKS00pKfHi5osIfl5f7Y4CioiS/2dq1/uKvvQYdO3qAn3NOkt9EopJQx21mZwA/BUZFW45I7urX7+vQ/kpFhV9Pmj17YOBAP4HmzTd9PHvGDIV2hkm04x4M/Cdw7MGeYGbdge4ADRs2rHllIjlmw4bqXa+2RYvghhv8pmOXLvDQQ3DKKUl6cUmlw3bcZnYlsDmEUHao54UQikMIhSGEwgYNGiStQJFccbB+p8Z90OefQ9++0KYNfPwxvPgiPPusQjuDJTJU0h74uZmtB54GLjazcZFWJZKDBgyAOnX2v1anjl8/YlOn+v4iDzzgm0OtWgW/+EWN6pT4HTa4Qwi/CyGcEULIB7oCM0MI3SKvTCTHFBX5PcK8PDDz78XFR3hjcutWuPZauOwy+Pa3/SbkI4/A8ccnvW5JPc3jFkkjRUU1nEESgh9scOutvgryzjv97uZ3vpO0GiV+1QruEMJsYHYklYhIzZSXQ8+ePie7dWvfK7tp07irkghoybtIpqus9BkijRv7kMjgwX6MmEI7a2moRCSTrVjhe2UvWODj2cOG+ZJLyWrquEUy0e7dcNdd0KIFvP02jBsHr76q0M4R6rhFMs0bb3iXvWYNdOvmR4pp7UROUcctkim2b4deveCCC/x0msmT4YknFNo5SMEtkgleeskP5h0+HPr08bHtyy6LuyqJiYJbJJ19+CFcc42vdjzxRJg3z1dBHnNM3JVJjBTcIukoBD8yrKAAJk6EP/3JT1lv3TruyiQN6OakSLpZt873FZkxAy680Ne9n3tu3FVJGlHHLZIu9u6FQYN8U6hFi3w8e/ZshbZ8gzpukXSweLFP8VuyxMezhw6F00+PuypJU+q4ReJUUQG33w6tWsGmTTB+vB/aq9CWQ1DHLRKXmTP93Md167zbvvdeOOGEuKuSDKCOWyTVPv3UjxDr1Mk33p4503fyU2hLghTcIqkSAjz3nE/xGzsW7rjDD+zt2DHuyiTDaKhEJBXefx969/YVkC1bwpQp0KxZ3FVJhlLHLRKlfft8q9VGjWD6dLjvPpg/X6EtNaKOWyQqq1fDjTf6bn6dO8OIEXD22XFXJVlAHbdIsn35Jdx9t3fVq1bBmDEwbZpCW5JGHbdIMs2f71P7Vq6Erl39GLGTT467Ksky6rhFkmHnTj9ZvV072LYNXn7ZT1tXaEsE1HGL1NTkydCjB7z3ns8cGTgQjj027qoki6njFjlSW7ZAURFccQXUrQtz5sDDDyu0JXIKbpHqCsGPDCso8AU1f/yjbw7Vrl3clUmO0FCJSHWsX+97ZU+bBm3b+lL1xo3jrkpyjDpukURUVvqRYY0bw9y5MGSID40otCUG6rhFDufNN32K36JF8NOf+krIM8+MuyrJYeq4RQ7miy+gXz/fW2T9enj6aZ/mp9CWmKnjFjmQv//d98r+n/+B667zPUZOOinuqkQAddwi+9u2zW8+XnQR7NnjNyEfe0yhLWlFwS3ylQkTfIrfqFHw29/C8uVwySVxVyXyDRoqEdm0CW6+GV54Ac4/38exW7aMuyqRg1LHLbkrBO+uCwrg1VfhL3/xmSMKbUlz6rglN731lt98nD3bx7OLi+Gf/inuqkQSoo5bcsuePd5ZN23qy9RHjfLDehXakkHUcUvuKC31hTTLlsG//ItvCHXqqXFXJVJt6rgl+33+uc8Sad3ad/SbMAHGj1doS8ZSxy3Zbfp0n5f97rv+/Z574Pjj465KpEYO23Gb2XfMbKGZLTOzlWbWPxWFidTIxx/7isdLL4Wjj/aVkMOHK7QlKyQyVLIbuDiEcD7QDLjMzNpEW5bIEQrB9xQpKICSErjzTh/T7tDhgE8vKYH8fKhVy7+XlKS0WpEjctihkhBCAHZWPTy66itEWZTIEdmwAXr1gldegVatYMYMnz1yECUlPiOwosIfl5f7Y/CDbUTSVUI3J82stpktBTYD00MIC6ItS6QaKit9f+zGjWHWLN83e+7cQ4Y2+MZ/X4X2Vyoq/LpIOksouEMIlSGEZsAZQCsza/KPzzGz7mZWamalW7ZsSXadIge2ciVceCHccgu0b++P+/SB2rUP+6sbNlTvuki6qNZ0wBDCZ8Bs4LID/FlxCKEwhFDYoEGDJJUnchC7d/tZj82b+9arTzzhp63n5yf8Eg0bVu+6SLpIZFZJAzOrV/Xzd4HOwJqoCxM5qLlzPbD794drroHVq6FbNzCr1ssMGAB16ux/rU4dvy6SzhLpuE8FZpnZm8AifIx7UrRliRzA9u2+i98FF/iimldfhXHj4Aj/hVdU5FuU5OV55ufl+WPdmJR0Zz5pJLkKCwtDaWlp0l9XctikSdCzJ2zcCL/+NfzpT3DMMXFXJZI0ZlYWQihM5Lla8i7p7aOPoGtX+NnPoF49mDcPBg9WaEtOU3BLegoBxozxhTQTJsDdd0NZme83IpLjtFeJpJ9163xfkRkzfDx75Eg499y4qxJJG+q4JX3s3eunqTdtCgsXwrBhvseIQltkP+q4JT0sWeJ7ZS9eDD//OQwdCmecEXdVImlJHbfEa9cuuOMO+NGPfMbIc8/Biy8qtEUOQR23xGfWLN/V6e234YYbYNAgOOGEuKsSSXvquCX1Pv3Uh0Uuvthnj8yY4Wc/KrRFEqLgltQJwY8MKyjwqX633w7Ll3uAi0jCNFQiqbFxI/TuDRMnQosWviFU8+ZxVyWSkdRxS7T27fMjwxo1gmnTfBx7wQKFtkgNqOOW6KxZAzfeCHPmQKdOMGIEfP/7cVclkvHUcUvyffmlbwJ1/vl+sMGjj/pp6wptkaRQxy3JtWCBzxhZscL3yn7wQTjllLirEskq6rglOXbu9CPD2rb16X4TJ8Izzyi0RSKgjltqbsoU6NHDj0nv1Qv+/Gc47ri4qxLJWuq45cht3epHhl1+uZ/5NWeO7zGi0BaJlIJbqi8EKCnxhTTPPgt33eWbRLVvH3dlIjlBQyVSPevX+7DI1KnQpo3vld2kSdxVieQUddySmMpKPzKscWMfEnnoIf+u0BZJOXXccnjLl/sUv4UL4Yor/ICDhg3jrkokZ6njloP74gu4807fW+Sdd+DJJ/20dYW2SKzUccuBvf66L1dfuxauvRbuvx/q14+7KhFBHbf8o23boGdP6NABdu/2m5Bjxyq0RdKIglu+NnGi7+JXXAx9+/qy9UsvjbsqEfkHCm6BTZugSxe46irvrOfP96GRunXjrkxEDkDBnctCgNGjvct++WUYOBBKS/3gXhFJW7o5maveegtuuskP7O3QwRfS/OAHcVclIglQx51r9uyBe+6B886DxYt9PHvWLIW2SAZRx51Lysp8Ic3SpXD11fDww3DaaXFXJSLVpI47F1RUwG23QatW8NFH8Pzz/qXQFslI6riz3d/+5mPZ77wD3bv7MEm9enFXJSI1oI47W338MVx/PVxyCdSuDbNn+2G9Cm2RjKfgzjYh+JFhjRrBuHHw+9/DsmXw4x/HXZmIJImGSrLJe+/50WGTJkFhIUyb5ieti0hWUcedDfbt8yPDGjWCmTN91eO8eQptkSyljjvTrVrlu/jNnevj2SNGwFlnxV2ViERIHXem2r0b+veHZs1gzRrfwW/qVIW2SA44bMdtZmcCjwOnAPuA4hDCg1EXJocwb54vpFm1Cn71Kz9S7Hvfi7sqEUmRRDruvcBvQggFQBugt5k1iras3FZSAvn5UKuWfy8pqfqDHTvgllv8NPUdO/wm5JNPKrRFcsxhO+4QwiZgU9XPO8xsNXA6sCri2nJSSYmvk6mo8Mfl5f74tCWv0PGZHrBxI9x8MwwYAMceG2+xIhKLao1xm1k+0BxYEEUxAv36fR3aAA3YzKiKX9Hx/ivhuOPgjTf8hHWFtkjOSji4zewY4HmgTwhh+wH+vLuZlZpZ6ZYtW5JZY07ZsOGrnwLXMpbVFHA1L/Bf9IclS6Bt2zjLE5E0kFBwm9nReGiXhBBeONBzQgjFIYTCEEJhgwYNklljTmnYEM7iHabyE8ZyHWs4l+YsYWzeXfCtb8VdnoikgcMGt5kZMBpYHUL4a/Ql5bC9e3m+/f2soAltmE9vhnAhr1NepxEDBsRdnIiki0Q67vbAvwMXm9nSqq8rIq4r9yxdCm3a0PLJ3/JJ80785PSVDLPeNMyrRXExFBXFXaCIpItEZpXMASwFteSmXbvgv/8bBg2Ck06CZ57hjC5dmGf6Ty4iB6Yl73GaPdvn+r31lm/Bet99cOKJcVclImlOS97j8Nlnvr9Ix45QWQnTp8Ojjyq0RSQhCu5Ue+EFKCjwoL7tNli+HDp3jrsqEckgGipJlQ8+8BWPEyb4xlCTJkHLlnFXJSIZSB131Pbtg+Ji77InT/YzHxcuVGiLyBFTxx2ltWv95uNrr/l4dnExnHNO3FWJSIZTxx2FPXt8E6jzz4c334TRo2HGDIW2iCSFOu5kW7jQ98pevhy6dPENoU45Je6qRCSLqONOls8/h759fROojz+GF1+EZ59VaItI0qnjToapU6FHD1i/Hnr2hD//GY4/Pu6qRCRLqeOuia1b4dpr4bLL4Nvf9puQjzyi0BaRSCm4j0QIfmRYQQE89RT84Q++SdSFF8ZdmYjkAA2VVFd5uQ+HTJ4MrVvDyJHQtGncVYlIDlHHnajKSp8h0rixD4k8+KAfI6bQFpEUU8ediBUrfIrfggU+nj18OOTlxV2ViOQoddyHsns33HUXtGgB69b5EeyvvqrQFpFYqeM+mDlzfOvVNWugWzd44AGoXz/uqkRE1HF/w/bt0KuXzxDZtQumTIEnnlBoi0jaUHD/fy+9BI0a+Rh2nz4+tv2Tn8RdlYjIfhTcAB9+CNdcA7/4hZ9CM2+eD40cc0zclYmIfENuB3cIfhJNQYF32wMGQFmZz88WEUlTuXtzct063yt75kwfzx45En74w7irEhE5rNzruPfuhUGDoEkTKC318ezZsxXaIpIxcqvjXrzYF9IsWeLj2UOHwumnx12ViEi15EbHXVEBt98OrVrBpk0wfrwf2qvQFpEMlP0d98yZPpa9bp132/feCyecEHdVIiJHLHs77k8+gRtugE6dwMwDfORIhbaIZLzsC+4Q4LnnfCHN2LFwxx1+YG/HjnFXJiKSFNk1VPL++9C7t8/JbtnSl6s3axZ3VSIiSZUdHfe+fTBsmHfZ06fDfffB/PkKbRHJSpnfca9e7bv4vfEGdO4MI0bA2WfHXZWISGQyt+P+8ku4+27vqletgjFjYNo0hbaIZL3M7Ljnz/epfStXQteuMHgwnHxy3FWJiKREZnXcO3fCrbdCu3awbRu8/LKfsq7QFpEckjkd9+TJ0KMHvPeezxwZOBCOPTbuqkREUi79O+4tW6CoCK64AurW9SPFHn5YoS0iOSt9gzsEPzKsoMAX1Pzxj745VLt2cVcmIhKr9BwqWb8ebrrJZ4m0bQujRvkcbRERSbOOu7LSjwxr3BjmzoUhQ3xoRKEtIvJ/DhvcZvaomW02sxVRFlJSAvfWvxf69mVG6MiEAav8JmSt9Pq7RUQkbokMlYwBhgCPR1VESYnvvHpURS8WcQ7jd/2SOr8zik/y+5IiIvK1w7azIYTXgE+iLKJfPz/rYDvHM54ugFFR4ddFRGR/SRuHMLPuZlZqZqVbtmyp1u9u2FC96yIiuSxpwR1CKA4hFIYQChs0aFCt323YsHrXRURyWVrc+RswAOrU2f9anTp+XURE9pcWwV1UBMXFkJfnp4zl5flj3ZgUEfmmw84qMbOngIuA+mb2PvBfIYTRyS6kqEhBLSKSiMMGdwjhV6koREREEpMWQyUiIpI4BbeISIZRcIuIZBgFt4hIhlFwi4hkGAshJP9FzbYA5Uf46/WBrUksJxPoM2e/XPu8oM9cXXkhhISWnUcS3DVhZqUhhMK460glfebsl2ufF/SZo6ShEhGRDKPgFhHJMOkY3MVxFxADfebsl2ufF/SZI5N2Y9wiInJo6dhxi4jIIaRNcKfqUOJ0YmZnmtksM1ttZivN7Na4a4qSmX3HzBaa2bKqz9s/7ppSxcxqm9kSM5sUdy2pYGbrzWy5mS01s9K464mamdUzs/Fmtqbq/+e2kb5fugyVmFkHYCfweAihSdz1pIKZnQqcGkJYbGbHAmXAVSGEVTGXFgkzM6BuCGGnmR0NzAFuDSHMj7m0yJlZX6AQOC6EcGXc9UTNzNYDhSGEnJjHbWZjgddDCKPM7FtAnRDCZ1G9X9p03Kk4lDjdhBA2hRAWV/28A1gNnB5vVdEJbmfVw6OrvtKjc4iQmZ0B/BQYFXctknxmdhzQARgNEEL4MsrQhjQK7lxnZvlAc2BBvJVEq2rIYCmwGZgeQsjqz1tlMPCfwL64C0mhAEwzszIz6x53MRE7G9gCPFY1HDbKzOpG+YYK7jRgZscAzwN9Qgjb464nSiGEyhBCM+AMoJWZZfWwmJldCWwOIZTFXUuKtQ8htAAuB3pXDYVmq6OAFsCwEEJz4HPgjijfUMEds6qx3ueBkhDCC3HXkypV/5ScDVwWcylRaw/8vGrM92ngYjMbF29J0QshfFD1fTMwAWgVb0WReh94///963E8HuSRUXDHqOpm3WhgdQjhr3HXEzUza2Bm9ap+/i7QGVgTb1XRCiH8LoRwRgghH+gKzAwhdIu5rEiZWd2qm+1UDRlcCmTtbLEQwofAe2b2w6pLnYBIJxgc9szJVEnVocRppj3w78DyqnFfgN+HEF6NsaYonQqMNbPaeNPwbAghJ6bH5ZiTgQnel3AU8GQIYUq8JUXuFqCkakbJO8D1Ub5Z2kwHFBGRxGioREQkwyi4RUQyjIJbRCTDKLhFRDKMgltEJMMouEVEMoyCW0Qkwyi4RUQyzP8CIO3yfplEP0gAAAAASUVORK5CYII=\n", "text/plain": [ - "" + "
    " ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], @@ -419,12 +420,14 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAk4AAAGGCAYAAACNCg6xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAPYQAAD2EBqD+naQAAIABJREFUeJzt3XmYXGWZ9/HvnYQACRIggURElAwSQDaTQQX1VZRF0MEN\nlAADggIKiIZxww0EV+YCBCGKqCCKEUQHRdGwOooE0YSdEIKyiAokAQJkg6Tv94+netJpupPT3dVd\nnarv57rO1V2nzvKc81Sd+p3nbJGZSJIkac2GNLoAkiRJawuDkyRJUkUGJ0mSpIoMTpIkSRUZnCRJ\nkioyOEmSJFVkcJIkSarI4CRJklSRwUmSJKkig5MkSVJFgyY4RcRxEfFARCyJiJsjYtfVDPuuiLg6\nIh6PiIURcVNE7N3FcAdGxOzaNG+PiH37dykkSVIzGxTBKSLeB5wBnAy8CrgdmB4RY7oZ5f8BVwP7\nAhOBG4ArI2LnDtPcHfgxcAGwC/AL4IqI2L6/lkOSJDW3GAwP+Y2Im4E/ZeZHa68D+DtwTmaeXnEa\ndwE/ycwv1V7/BBiRmft3GGYGcGtmHlvvZZAkSc2v4S1OEbEOMAm4rr1fljR3LbBbxWkE8CLgiQ69\nd6tNo6PpVacpSZLUWcODEzAGGAo81qn/Y8C4itP4BDASuKxDv3F9nKYkSdIqhjW6AH0VEQcDnwf2\nz8z5fZzWaGAf4EFgad9LJ0mSBth6wMuB6Zm5oN4THwzBaT6wAhjbqf9Y4NHVjRgRBwHfAQ7IzBs6\nvf1oL6a5D3DJmgosSZIGvUMoF4nVVcODU2Y+HxEzgbcAv4T/O2fpLcA53Y0XEZOB7wLvy8zfdjHI\njC6msVetf3ceBPjRj37EdtttV6n8V10Fn/883HgjrL9+pVFU0ZQpUzjrrLMaXQzVWB+Di/UxuFgf\ng8fs2bM59NBDofabXm8ND041ZwIX1QLULcAUYARwEUBEfBXYPDMPr70+uPbeCcCfI6K9ZWlJZj5d\n+/9s4HcRcSLwa2Ay5ST0o1ZTjqUA2223HRMnTqxU8Llzy9+ddoIXvajSKKpo1KhRletB/c/6GFys\nj8HF+hiU+uWUm8FwcjiZeRnwceBU4FZgJ2CfzJxXG2Qc8NIOoxxFOaH8POCfHbpvdJjmDOBg4Gjg\nNuDdwDsy8556ln3o0PJ3+fJ6TlWSJA1Gg6XFicycCkzt5r0jOr3eo+I0fwb8rO+l696w2hpcsaI/\n5yJJkgaDQdHitDazxUmSpNZhcOojW5z6z+TJkxtdBHVgfQwu1sfgYn20DoNTH9ni1H/cEA0u1sfg\nYn0MLtZH6zA49ZEtTpIktQ6DUx/Z4iRJUuswOPWRLU6SJLUOg1Mf2eIkSVLrMDj1kS1OkiS1DoNT\nH9niJElS6zA49ZEtTpIktQ6DUx/Z4iRJUuswOPWRLU6SJLUOg1Mf2eIkSVLrMDj1kS1OkiS1DoNT\nH9niJElS6zA49ZEtTpIktQ6DUx/Z4iRJUuswOPWRLU6SJLUOg1Mf2eIkSVLrMDj1kS1OkiS1DoNT\nH9niJElS6zA49ZEtTpIktQ6DUx8Nqa1BW5wkSWp+Bqc+iiiH62xxkiSp+Rmc6mDoUFucJElqBQan\nOhg2zBYnSZJagcGpDmxxkiSpNRic6sAWJ0mSWoPBqQ5scZIkqTUYnOrAFidJklqDwakObHGSJKk1\nGJzqwBYnSZJag8GpDmxxkiSpNRic6sAWJ0mSWoPBqQ5scZIkqTUYnOrAFidJklqDwakObHGSJKk1\nGJzqwBYnSZJag8GpDmxxkiSpNRic6sAWJ0mSWoPBqQ5scZIkqTUYnOrAFidJklqDwakObHGSJKk1\nGJzqwBYnSZJag8GpDmxxkiSpNRic6sAWJ0mSWoPBqQ6GDjU4SZLUCgxOdTBsmIfqJElqBQanOrDF\nSZKk1mBwqgNbnCRJag0GpzqwxUmSpNZgcKoDW5wkSWoNBqc6sMVJkqTWYHCqA1ucJElqDQanOrDF\nSZKk1mBwqgNbnCRJag0GpzqwxUmSpNZgcKoDW5wkSWoNBqc6sMVJkqTWYHCqA1ucJElqDQanOrDF\nSZKk1mBwqgNbnCRJag0GpzqwxUmSpNZgcKoDW5wkSWoNBqc6sMVJkqTWYHCqA1ucJElqDQanOrDF\nSZKk1mBwqoNhwyAT2toaXRJJktSfDE51MHRo+WurkyRJzc3gVAfDhpW/nuckSVJzGzTBKSKOi4gH\nImJJRNwcEbuuZthxEXFJRMyJiBURcWYXwxweEW2199tq3eL+KLstTpIktYZBEZwi4n3AGcDJwKuA\n24HpETGmm1HWBR4HTgNuW82kFwLjOnQvq1eZO7LFSZKk1jAoghMwBTg/My/OzHuBDwGLgSO7Gjgz\nH8rMKZn5I+Dp1Uw3M3NeZj5e6+bVv+i2OEmS1CoaHpwiYh1gEnBde7/MTOBaYLc+Tn6DiHgwIh6O\niCsiYvs+Tq9LtjhJktQaGh6cgDHAUOCxTv0foxxe6605lBar/YFDKMt6U0Rs3odpdskWJ0mSWsOw\nRhegv2TmzcDN7a8jYgYwGziGci5Vt6ZMmcKoUaNW6Td58mQmT57c5fC2OEmSNPCmTZvGtGnTVum3\ncOHCfp3nYAhO84EVwNhO/ccCj9ZrJpm5PCJuBbZe07BnnXUWEydOrDxtW5wkSRp4XTVqzJo1i0mT\nJvXbPBt+qC4znwdmAm9p7xcRUXt9U73mExFDgB2Bf9Vrmu1scZIkqTUMhhYngDOBiyJiJnAL5Sq7\nEcBFABHxVWDzzDy8fYSI2BkIYANg09rr5zJzdu39z1MO1d0PbAR8EtgS+G69C2+LkyRJrWFQBKfM\nvKx2z6ZTKYfobgP26XD7gHHASzuNdiuQtf8nAgcDDwHja/02Br5TG/dJSqvWbrXbHdTVOuuUv889\nV+8pS5KkwWRQBCeAzJwKTO3mvSO66Lfaw4yZeSJwYn1Kt3obbVT+PvXUQMxNkiQ1SsPPcWoGo0eX\nv/PnN7YckiSpfxmc6mDjjSECFixodEkkSVJ/MjjVwdChJTwZnCRJam4GpzoZPdrgJElSszM41YnB\nSZKk5mdwqhODkyRJzc/gVCcGJ0mSmp/BqU4MTpIkNT+DU50YnCRJan4GpzoZPRqeeAIy1zysJEla\nOxmc6mT06PKQ34ULG10SSZLUXwxOddL+2BUP10mS1LwMTnVicJIkqfkZnOrE4CRJUvMzONWJwUmS\npOZncKqT9dcv3fz5jS6JJEnqLwanOhozxhYnSZKamcGpjrwJpiRJzc3gVEcGJ0mSmpvBqY4MTpIk\nNTeDUx0ZnCRJam4GpzoyOEmS1NwMTnVkcJIkqbkZnOpo9GhYvBiWLm10SSRJUn8wONWRdw+XJKm5\nGZzqyOAkSVJzMzjVkcFJkqTmZnCqI4OTJEnNzeBUR6NGwZAhBidJkpqVwamOhgwprU7z5ze6JJIk\nqT8YnOrMezlJktS8DE51ZnCSJKl5GZzqzOAkSVLzMjjVmcFJkqTmZXCqM4OTJEnNy+BUZwYnSZKa\nl8GpzkaPhiefhBUrGl0SSZJUbwanOhs9GjLhqacaXRJJklRvBqc687ErkiQ1L4NTnRmcJElqXgan\nOjM4SZLUvAxOdWZwkiSpeRmc6mz4cNhgA4OTJEnNyODUD8aMgfnzG10KSZJUbwanfuBNMCVJak4G\np35gcJIkqTkZnPqBwUmSpOZkcOoHBidJkpqTwakfGJwkSWpOBqd+0B6cMhtdEkmSVE8Gp34wejQs\nWwaLFze6JJIkqZ4MTv3Au4dLktScDE79wOAkSVJzMjj1A4OTJEnNyeDUDwxOkiQ1J4NTP3jRi2DY\nMIOTJEnNxuDUDyK8l5MkSc3I4NRPxowxOEmS1GwMTv1k9GiYP7/RpZAkSfVkcOonHqqTJKn5GJz6\nicFJkqTmY3DqJwYnSZKaT6+CU0R8ISJGdNF//Yj4Qt+LtfYzOEmS1Hx62+J0MrBBF/1H1N5reaNH\nw8KFsHx5o0siSZLqpbfBKYDsov/OwBO9L07zaL97+BOuDUmSmsawngwcEU9SAlMC90VEx/A0lNIK\n9e36FW/ttemm5e/jj8NmmzW2LJIkqT56FJyAj1Fam75POSS3sMN7zwEPZuaMOpVtrTZ+fPl7//2w\nww6NLYskSaqPHgWnzPwBQEQ8APwxMz2Dpxtjx8KGG8J99zW6JJIkqV56e47TM8B27S8i4h0RcUVE\nfCUihtenaGu3CNhmG5gzp9ElkSRJ9dLb4HQ+sA1ARIwHLgUWAwcCp/dmghFxXEQ8EBFLIuLmiNh1\nNcOOi4hLImJORKyIiDO7Ge7AiJhdm+btEbFvb8rWWxMmGJwkSWomvQ1O2wC31f4/EPjfzDwYeD/w\nnp5OLCLeB5xBOW/qVcDtwPSIGNPNKOsCjwOndShH52nuDvwYuADYBfgFcEVEbN/T8vXWNtt4qE6S\npGbSl9sRtI+7J3BV7f+/A92FndWZApyfmRdn5r3AhygtWEd2NXBmPpSZUzLzR8DT3UzzBOA3mXlm\nZs7JzC8As4Dje1G+XpkwAebNgyefHKg5SpKk/tTb4PQX4HMR8Z/AG4Ff1/pvBTzWkwlFxDrAJOC6\n9n6ZmcC1wG69LB+1ca/t1G96H6fZIxMmlL+2OkmS1Bx6G5w+BkwEzgW+nJn31/ofANzUw2mNodwD\nqnPgegwY18vyURu33tPskVe8ovz1PCdJkppDT+/jBEBm3gHs2MVbnwBW9KlETWTkSNhiC4OTJEnN\nolfBqV1ETGLlbQnuycxZvZjMfErYGtup/1jg0T4U79HeTnPKlCmMGjVqlX6TJ09m8uTJPS7EhAke\nqpMkqT9MmzaNadOmrdJv4cKF3QxdH70KThGxGeUWBG8Enqr13igibgAOysx5VaeVmc9HxEzgLcAv\na9OP2utzelO+mhldTGOvWv/VOuuss5g4cWIfZr3SNtvAjTfWZVKSJKmDrho1Zs2axaRJk/ptnr09\nx+mblOfSvTIzN8nMTYAdgA3pXdg5EzgqIg6LiG0pz7sbAVwEEBFfjYgfdBwhInaOiF1q5di09nq7\nDoOcDbw1Ik6MiAkRcQrlJPRze1G+XpswAebOhba2gZyrJEnqD709VPdWYM/MnN3eIzPviYjjgKt7\nOrHMvKx2z6ZTKYfTbgP26dByNQ54aafRbqU8bBjKieoHAw8B42vTnBERBwNfrnVzgXdk5j09LV9f\nbLMNLF0Kf/87vOxlAzlnSZJUb70NTkOA57vo/zy9bMXKzKnA1G7eO6KLfmucT2b+DPhZb8pTL+23\nJJgzx+AkSdLarreH6q4Hzo6Izdt7RMRLgLPocD8mlbA0fLhX1kmS1Ax6G5yOp5zP9GBE/DUi/go8\nUOv3kXoVrhkMHQpbb+2VdZIkNYPe3sfp7xExkfK4lW1rvWdnZuc7dQsf9itJUrPoUYtTRLw5Iu6J\niA2zuCYzv5mZ3wT+HBF3R8Q+/VTWtZYP+5UkqTn09FDdx4ALMvMFD9bNzIXA+Xio7gUmTICHH4Yl\nSxpdEkmS1Bc9DU47A79dzftXAzv1vjjNacIEyCz3c5IkSWuvngansXR9G4J2y4FNe1+c5rTNNuWv\nh+skSVq79TQ4/YNyh/Du7AT8q/fFaU5jxsAmm3iCuCRJa7ueBqergNMiYr3Ob0TE+sAXgV/Vo2DN\nxivrJEla+/X0dgRfAt4N3BcR5wLtUWBb4DhgKOXxJupkm23g3nsbXQpJktQXPQpOmflYROwOfAv4\nKhDtbwHTgeMy87H6FrE5TJgAv/hFOUk8Ys3DS5KkwafHN8DMzIeA/SJiY2BrSniam5lP1rtwzWSb\nbeCpp2DePNhss0aXprkYRiVJA6W3j1whM5/MzD9n5i2GpjVrf9ivV9bVT1sbfOYz5XmA//hHo0sj\nSWoFvQ5O6pmtty6tIp4gXh+LF8N73wtf+1ppyTvxxEaXSJLUCgxOA2S99UrLSLO2OP3jH3DuufD8\n6u7yVSePPgp77AG/+Q1ccQWcdx5cdhlcfXX/z7snMuFb34J3vxuefbbRpZEGv0w49VT4yldKi/JA\naWuD//kf+PWvB3a+VT37LBx7LFx6aaNLIjA4DajBfkuChQvLF3P58p6Nd8cd8JrXwEc+Akce2b8b\nnrvuKvN65BH4wx9g//3h0EPhTW+C446DpUu7Hu+KK2C//cqjbwbCs8/CIYeUjd2vfgUf/GD5UZDU\ntRUr4Jhj4OST4XOfg/e8p/93ODLht7+FiRPLDs7b3w7bbQff/nZp1a63GTPgwQd7Ns4DD8Duu5ed\nsCOP9AkUg4HBaQD1NjgtWVK+NAceWL54/WHuXHjta+Ggg0rAeLLiWWvXXAOvfz2MHVtanC65pBw2\n6y4kzJsHV14Jzz1XvWzz58PPfw4nnFA2IBtvDH/6U9nYQTkEOnUqPPQQfP3rLxz/pz+FAw6AG26A\n3XaD22+vPu/emD0bXv3qspyXXlrWyaWXwje+0b/zldZWy5fD+98P3/seXHRRuQL52mvhda8r3+v+\ncMst8OY3w777woYbwk03lW6nncpO2EtfWgLc/Pl9n1dbG5xyStl+veEN1c/JvP56+Pd/LyHuT3+C\nF78YDj+85zu3qrPMtKt1wEQgZ86cmf3h3HMz11knc8mSzBUr1jz8ggWZp52WuemmmUOGZI4fnxmR\n+dGPZj77bPfjPftsZltb9XJdd13mxhtnbrNN5ve/v/L/OXNWP96FF2YOG5a5336ZzzxT+k2dmgml\n3J1ddllZFijT/+1vu55uW1vmzTeX5dxppzI8ZP7bv2Uef/zKeXV20kmZ666bOXfuyn4/+Unm0KGZ\nBx+c+cgjmZMmZb7oRWWZq1iyJPM73ynzXdP6yMz88Y8zR47MfOUrM2fPXtn/4x8v5fjd76rNt5Ge\nfz5z0aJGl0KtYtmyzPe8p2xLLr10Zf8778zcaquyzfjDH+o3v0WLMg8/vGxTdtgh88orX7i9/Nvf\nyvZngw1KGf76197P76mnMt/+9rLtPumkzJe+NHOXXTKffrr7cdraMs8+u2wz9tqr/BZkZv7xj+W3\n4Mtf7n156uHuuzOffLKxZVidmTNnJuU2SROzP7JCf0x0be36Ozhdf/3KENDeDRuWOWJE+XK+7nWZ\nBx5YvrAf+lD5AV5vvcxjj828//7M5cszzzgjc/31y/DXXlum29aWOXNm5uc/n7njjmW6I0dmbr99\n5lvfmnnMMZmnn575+9+XINDR1Knly7n33iu/CPfdlzlhQuZGG2VeffULl2PRosyTTy7zOfro8kPb\n0WmnlfemTi2vH320bBgh893vzrzhhsw3vam8fsc7Vm6Unn4681vfKhsVyNxii8wjjsi8+OLMhx5a\n8/pdtCjz5S8vy9LWVkLMkCGZhx5a1l1mCV377FMC7I9/3P20FizI/NKXMseOLRu8MWNKXR1/fObj\nj6867PPPZ/7855l77lnKfcghLwy2zz+fuccemZttVgJcPT3wQOYll2T+6Efl7yWXlGW7+uryo1TV\n3/9e6vUlLykB9GMfK3XXKP/8Z/khveSSF35uO7rllrLud9wx81e/Wv0029rK+v/lL8uy/sd/lOUd\nPjxz881LUN9jj/I9/MpXug/p3fnHPzJPPDHzne8s419/fc+nsSaLFmWec07mWWdlLl5c32n3p8ce\nK9upuXPL/0uWlO5tbyvr/xe/eOE48+ZlvvGN5ft69tkrv8e99cADZfsyYkTmBReseXoPPZT5ildk\nvvjFJSz01F13lfE32ijzqqtKvzvuyNxww8x9933htjOzLPMhh5RtyX/91wuHOemksi2aNavn5emr\nFSvK53rIkMwtt8z885/7Ps0bbyzfu6OOyly4sO/TyzQ4NVVwWr488/LLMy+6KPN738s8//wSLs46\nK/NTn8o87LDyA7D99pkve1kJQo899sLpzJ1bNiZQWnu23LL8v9FGmf/5n6XV6IwzMk84oQSTXXYp\ne05QNlCve12Z3wc+UPqdcMILv5xPPlkCxtChpRxf+ELmu95VNgIRZbyvfrXrlq22thL+IsqPyOjR\nJXhceunK4dvaSmvQFluUH+l3vauUcciQUubf/KZaq1xnV15ZynbEEWVahx32wo3jc8+t3OP85CfL\nBvQ73ynd+eeX9TFyZCnXMceUlqbFizO/9rWywdtww7Lx+OtfM7/4xfLDC5m7777qMnb22GNleXfb\nrVqgWb68+x/d554rYW2ffVbWR1fdxhtnHnlk5vTpL6zjtrYSVq+6KnP//cv6GjmybMA+97mynCNG\nlM/K/PlrLm932toyn3gi81//ynzwwbI+77ijbHRnzCiB/rrrSgvkd79b6mb8+FWXY8yYzM9+tgST\ndvfdVwIOlBa+PfYo/++/f2kx6Gjhwszzzlu5YwHlc7n33uWH6JxzMk85pQTjgw7KfMtbSv2PHVvC\n/HPPrX4ZH3qo7OCsu275Hu6xR2nZhLJed965fK5uuKHrH8sqnnkm8+tfL+F76NDy4/myl63+M9ed\nefPKej///PJd3XvvzF13zXztazNf//qyfXnzm8tn4Zprui/zihWlLu+8s/sy3HVX+T4OH/7Cz+eQ\nIWVHcPr07su6bFmpF8icODHzppt6tqztbrihfI622irz9turj/foo+VzM3p05l/+Um2cJUvKNn7k\nyDJux1bwzLJOhw0rO8jt623FivL5Hz06c9SosiPUlWXLyudphx1Wv0NRb088UVrOoLSgv/rVpU4v\nuKB301uyJPMTnyjbr1e9qmz/t9xyZYNAXxicmig41dOKFZnf/nbZkBx3XPmwrW7jvnx55q23lh+I\n97637EENH142nN15/vnMKVPKp2Ts2BLqPvax8uW+4441l+/QQ8u4733vC1tp2j37bOZnPlP2OE4+\nubR69NU731nm+/73d79H2dZWwsHQoSs34hErW5e6C63z5mV+5CNlo9fesnf00WXdVnHzzWW9v/vd\npRXwjDNKcD777BLMjjmmNM3/27+tnMdmm5UfsyOOKGH1M58p9QeZr3lN2UA/+WQJd4sWlXX67LPl\nx+Gzny3Tag8fb397CXjjx5dQ1L7su+xSAkLHPb4FC8q8Ro4sIeCoo8qG7nOfK62Kp5+e+c1vlkO2\nl19egs+NN5bP4tlnZ37wg+WHuD1AVOmGDCmf6RNOKId2//nPEpA+8pGyYR02LHPy5LKehg0rQfTC\nC0s9t7WVcV7yktJSe+qpZX0fdVRZhqFDy2fjpz8tQWdNYePBB8uOSEQ5tPzzn5dxnnuuBLhbby3L\nfNRRpUVkk01KK+VTT5Xxly8vgeKCC0p43WKLlYHtiCNKyF+woPsQvXRp+dG+994y3dGjy3yOProE\nwzlzSkiE8vn4y19K+ebMKfM87LDSAjtsWFn2oUPL+u0YtIcMKTtD++9f6uuII8p4hxxSvrftAXbT\nTcuP/A03ZN52W+Y3vlF2djbZZOW0xo4t4114YebDD5fWtv32K++95CXl83LLLaX/FVeUluRzzy3T\nq2LGjPLZaN8xav9+LllSDmH993+X79UBB5Qdm+nTS+BvayvbvaFDSyDuzU7AggXlu7bhhiVwdqWt\nrdTBsceW8AzlFIHuTqv43vfKMKefXranr3tdeX3ooWtu6b399rId+cQnXvje00+X0wSuvTbzBz8o\n6+L440tAPuWU8t384Q9L6+xtt1Vr5Zk1qwTOjTde2aq7dGn5TED5fLe3fra1Zd5zT5nPu95Vvh8/\n/OGqRw1mziw7O8OHl+3e8uWlNbB95+fYY3veUrtiRZnGr3+d+dGP9m9wikwv9WkXEROBmTNnzmRi\n+5nHTSqz3Dpg+PA1D7t0abmdQk8tX15OlN5xx56P2xfz5pXLig87DIas4fKH7OVdx+fOLSeXvv3t\nMGpUz8b9wQ/gU58qJ8ivWFFOHG1rg3XWga22gvHjV/590Yvgr38t85s7t9zOoq2tXEl49NGwyy5r\nnl8mzJpVTlC/885yIv+4cSu7bbeFnXfufj3Mm1dOur/66vJZWLoUli0r3ZIlXZ/ov+66sP32sMMO\npf632qp8htZdd2U3fHhZ5o7dxhvDBht0XY6FC+HCC+Gb3ywXL3zmM+Uk3vXXX3W4Z5+F006DM88s\nn8EttoCjjoIPfABe8pI1r6/Obrut1NfVV5f6eOaZVd/fbDP4+Mfhwx/uvuxQ6uEvfymXvf/856te\nKDJsGIwcCSNGlOGeemrVK0TXXbdcmfmpT5WTlju65hqYMgXuuQfGjCn1NWRI+Wy84Q3lqQURq3Yb\nb1yuHnvFK8q0V1fmmTPL7T4uu2zlidrDh5cLLd74xtK1tZWTua+9tnzW2n9WdtyxrJuDDqq2rVmT\nFSvgggtK3WeWZZg5s3wGR4woF2W0f97b62ncuHILkylT4PTTy7rujWeegXe8A26+uXz31lln5Xtt\nbaUe7rwTNt+8bHuOOKKs+9X5/OfhS18qZdp663KRyx57VCvP6afDpz9d5vP44+WK4YcfLp+djjbe\nuHzuM+GJJ0q3bNmqw4wZU7Y348fDJpus/K6ut175jp91FrzylXD55eW73NFFF5XP/vbblxPrr7mm\nnPw+fHi54OiJJ8rV0FBuybPLLmX7vMMOcPHFq/4+tLWVdfCpT5V6e+c7S72OHLmyW7q0bAueemrl\n3/vvL7817VdCrrfeLJYunQQwKTNnVVuj1RmcOmil4KS1U/v+/ZoC4UB67jlYtKgElmeeKT8o48fD\n0KH9M7/2212saR3MnVsu/d5jj97/WHZ03XXw5z+XoDR27Mq/L37xqj+iVc2eDXffXTb2ixat/Auw\n0Uardttuu/pHNS1fXn7AHnywXOW6++7lSrF6yizLv2RJCSidA2u7BQvgd78r5X7zm/vncUjz55f7\nPT3+eLnybvfdy492ez20tZUf05kz4dZbS3kPOKDv812ypNwy4U9/WrV/Ztn5OOII2Hvv6p+3TPjs\nZ8vO15QpPQuXK1aUK+zuuquE6S23LH/bu803L4Gpq3pasqQEmkcegb/9bdWuPbR33EF63/vKjkh3\nO9C33VbCYgTsuSfstVcJ7SNHlvcXLIAbb4Tf/74Ezz33LMvd3fLef3+5Onvu3PKdaO+WLSvrdtSo\nVbvx40vMv2pvAAAPT0lEQVRwa+/mzZvFrrsanAaEwUmSpMFpxYqyw7SmMD5r1iwmTeq/4FSH/TBJ\nkqT+1V+t2D01iBr8JUmSBjeDkyRJUkUGJ0mSpIoMTpIkSRUZnCRJkioyOEmSJFVkcJIkSarI4CRJ\nklSRwUmSJKkig5MkSVJFBidJkqSKDE6SJEkVGZwkSZIqMjhJkiRVZHCSJEmqyOAkSZJUkcFJkiSp\nIoOTJElSRQYnSZKkigxOkiRJFRmcJEmSKjI4SZIkVWRwkiRJqsjgJEmSVJHBSZIkqSKDkyRJUkUG\nJ0mSpIoMTpIkSRUZnCRJkioyOEmSJFVkcJIkSarI4CRJklSRwUmSJKkig5MkSVJFBidJkqSKDE6S\nJEkVGZwkSZIqMjhJkiRVZHCSJEmqyOAkSZJUkcFJkiSpIoOTJElSRQYnSZKkigxOkiRJFRmcJEmS\nKho0wSkijouIByJiSUTcHBG7rmH4N0XEzIhYGhH3RcThnd4/PCLaImJF7W9bRCzu36WQJEnNbFAE\np4h4H3AGcDLwKuB2YHpEjOlm+JcDvwKuA3YGzga+GxF7dRp0ITCuQ/eyfii+JElqEYMiOAFTgPMz\n8+LMvBf4ELAYOLKb4T8M/C0zP5mZczLzPODy2nQ6ysycl5mP17p5/bYEkiSp6TU8OEXEOsAkSusR\nUNIOcC2wWzejvbb2fkfTuxh+g4h4MCIejogrImL7OhVbkiS1oIYHJ2AMMBR4rFP/xyiH17oyrpvh\nN4yIdWuv51BarPYHDqEs600RsXk9Ci1JklrPsEYXoL9k5s3Aze2vI2IGMBs4hnIulSRJUo8MhuA0\nH1gBjO3UfyzwaDfjPNrN8E9n5rKuRsjM5RFxK7D1mgo0ZcoURo0atUq/yZMnM3ny5DWNKkmSBsi0\nadOYNm3aKv0WLlzYr/OMcjpRY0XEzcCfMvOjtdcBPAyck5n/3cXwXwP2zcydO/T7MbBRZu7XzTyG\nAHcDv87Mj3czzERg5syZM5k4cWJfF0uSJA2wWbNmMWnSJIBJmTmr3tMfDOc4AZwJHBURh0XEtsC3\ngRHARQAR8dWI+EGH4b8NjI+Ir0fEhIg4FjigNh1q43w+IvaKiK0i4lXAJcCWwHcHZpEkSVKzGQyH\n6sjMy2r3bDqVcsjtNmCfDrcPGAe8tMPwD0bE24CzgBOAR4APZGbHK+02Br5TG/dJYCawW+12B5Ik\nST02KIITQGZOBaZ2894RXfT7PeU2Bt1N70TgxLoVUJIktbzBcqhOkiRp0DM4SZIkVWRwkiRJqsjg\nJEmSVJHBSZIkqSKDkyRJUkUGJ0mSpIoMTpIkSRUZnCRJkioyOEmSJFVkcJIkSarI4CRJklSRwUmS\nJKkig5MkSVJFBidJkqSKDE6SJEkVGZwkSZIqMjhJkiRVZHCSJEmqyOAkSZJUkcFJkiSpIoOTJElS\nRQYnSZKkigxOkiRJFRmcJEmSKjI4SZIkVWRwkiRJqsjgJEmSVJHBSZIkqSKDkyRJUkUGJ0mSpIoM\nTpIkSRUZnCRJkioyOEmSJFVkcJIkSarI4CRJklSRwUmSJKkig5MkSVJFBidJkqSKDE6SJEkVGZwk\nSZIqMjhJkiRVZHCSJEmqyOAkSZJUkcFJkiSpIoOTJElSRQYnSZKkigxOkiRJFRmcJEmSKjI4SZIk\nVWRwkiRJqsjgJEmSVJHBSZIkqSKDkyRJUkUGJ0mSpIoMTpIkSRUZnCRJkioyOEmSJFVkcJIkSarI\n4CRJklSRwUmSJKkig5MkSVJFBidJkqSKDE6SJEkVGZwkSZIqMjhJkiRVZHCSJEmqyOAkSZJUkcFJ\nkiSpIoOTJElSRYMmOEXEcRHxQEQsiYibI2LXNQz/poiYGRFLI+K+iDi8i2EOjIjZtWneHhH79t8S\nqN6mTZvW6CKoA+tjcLE+Bhfro3UMiuAUEe8DzgBOBl4F3A5Mj4gx3Qz/cuBXwHXAzsDZwHcjYq8O\nw+wO/Bi4ANgF+AVwRURs328LorpyQzS4WB+Di/UxuFgfrWNQBCdgCnB+Zl6cmfcCHwIWA0d2M/yH\ngb9l5iczc05mngdcXptOuxOA32TmmbVhvgDMAo7vv8WQJEnNrOHBKSLWASZRWo8AyMwErgV262a0\n19be72h6p+F3qzCMJElSZQ0PTsAYYCjwWKf+jwHjuhlnXDfDbxgR665hmO6mKUmStFrDGl2AQWY9\ngNmzZze6HAIWLlzIrFmzGl0M1Vgfg4v1MbhYH4NHh9/w9fpj+oMhOM0HVgBjO/UfCzzazTiPdjP8\n05m5bA3DdDdNgJcDHHrooasvsQbMpEmTGl0EdWB9DC7Wx+BifQw6LwduqvdEGx6cMvP5iJgJvAX4\nJUBERO31Od2MNgPofGuBvWv9Ow7TeRp7dRqms+nAIcCDwNJqSyBJkgaR9SihaXp/TDzKediNFRHv\nBS6iXE13C+XquAOAbTNzXkR8Fdg8Mw+vDf9y4E5gKvB9SkD6BrBfZl5bG2Y34HfAScCvgcnAp4GJ\nmXnPAC2aJElqIg1vcQLIzMtq92w6lXI47TZgn8ycVxtkHPDSDsM/GBFvA86i3HbgEeAD7aGpNsyM\niDgY+HKtmwu8w9AkSZJ6a1C0OEmSJK0NBsPtCCRJktYKBqeanj4rT30XESdFxC0R8XREPBYR/xMR\n23Qx3KkR8c+IWBwR10TE1o0ob6uJiE9HRFtEnNmpv/UxQCJi84j4YUTMr63v2yNiYqdhrI8BEBFD\nIuK0iPhbbV3fHxGf62I466MfRMQbIuKXEfGP2nZp/y6GWe26j4h1I+K82vfpmYi4PCI262lZDE70\n/Fl5qps3AN8EXgPsCawDXB0R67cPEBGfojwm52jg1cAiSt0MH/jito7ajsPRlO9Cx/7WxwCJiI2A\nPwLLgH2A7YD/Ap7sMIz1MXA+DRwDHAtsC3wS+GRE/N9jvKyPfjWScv7zscALzjGquO6/AbwNeA/w\n/4DNgZ/1uCSZ2fIdcDNwdofXQTnh/JONLlsrdZS7yLcBr+/Q75/AlA6vNwSWAO9tdHmbtQM2AOYA\nbwZuAM60PhpSD18D/ncNw1gfA1cfVwIXdOp3OXCx9THgddEG7N+p32rXfe31MuBdHYaZUJvWq3sy\n/5Zvcerls/LUPzai7Ek8ARARW1GuqOxYN08Df8K66U/nAVdm5vUde1ofA+4/gL9ExGW1Q9mzIuKD\n7W9aHwPuJuAtEfEKgIjYGXgdcFXttfXRIBXX/b9T7iTQcZg5wMP0sH4Gxe0IGmx1z8qbMPDFaU21\nm55+A7gxV94yYhwlSPnMwQESEQcBu1A2Mp1ZHwNrPPBhymkEX6YcfjgnIpZl5g+xPgba1yitFvdG\nxArKqS6fzcyf1N63PhqnyrofCzxXC1TdDVOJwUmDxVRge8oenBogIraghNc9M/P5RpdHDAFuyczP\n117fHhE7UG4U/MPGFatlvQ84GDgIuIeyg3F2RPyzFmTVIlr+UB29e1ae6igizgX2A96Umf/q8Naj\nlPPNrJuBMQnYFJgVEc9HxPPAG4GPRsRzlD0z62Pg/Avo/MTx2cCWtf/9fgys04GvZeZPM/PuzLyE\nchPmk2rvWx+NU2XdPwoMj4gNVzNMJS0fnGp71u3PygNWeVZe3R8OqFXVQtM7gD0y8+GO72XmA5QP\ndMe62ZByFZ51U3/XAjtS9qR3rnV/AX4E7JyZf8P6GEh/5IWnC0wAHgK/Hw0wgrKT3VEbtd9R66Nx\nKq77mcDyTsNMoOyIrO4Zti/gobriTOCi2sOG25+VN4Ly/Dz1k4iYSnmG4P7Aooho31tYmJntD1n+\nBvC5iLif8vDl0yhXPP5igIvb9DJzEeUQxP+JiEXAgsxsb/mwPgbOWcAfI+Ik4DLKj8AHgaM6DGN9\nDJwrKev6EeBuYCLlt+K7HYaxPvpJRIwEtqa0LAGMr52g/0Rm/p01rPvMfDoivgecGRFPAs8A5wB/\nzMxbelSYRl9WOFg6yr0hHqRcvjgD+PdGl6nZO8re2oouusM6DXcK5VLTxZSnXW/d6LK3SgdcT4fb\nEVgfA77+9wPuqK3ru4EjuxjG+hiYuhhJ2cl+gHKPoLnAF4Fh1seArP83dvOb8f2q6x5Yl3LvwPm1\n4PRTYLOelsVn1UmSJFXU8uc4SZIkVWVwkiRJqsjgJEmSVJHBSZIkqSKDkyRJUkUGJ0mSpIoMTpIk\nSRUZnCRJkioyOEmSJFVkcJKkLkREW0Ts3+hySBpcDE6SBp2IuLAWXFbU/rb/f1WjyyaptQ1rdAEk\nqRu/Ad7PyqehAyxrTFEkqbDFSdJgtSwz52Xm4x26hfB/h9E+FBFXRcTiiPhrRLyn48gRsUNEXFd7\nf35EnB8RIzsNc2RE3BURSyPiHxFxTqcybBoRP4+IRRFxX0T8R4dxN4qISyLi8do85kTE4f22NiQN\nCgYnSWurU4GfAjsBlwA/iYgJABExApgOLAAmAQcAewLfbB85Ij4MnAt8G3gl8Dbgvk7z+ALwE2BH\n4CrgkojYqPbel4BtgX1qfz8MzK/3QkoaXCIzG10GSVpFRFwIHAos7dA7ga9k5tciog2YmpnHdxhn\nBjAzM4+PiKOArwJbZObS2vv7AlcCL87MeRHxCPC9zDy5mzK0Aadm5im11yOAZ4G3ZubVEfELYF5m\nfrC+Sy9pMPMcJ0mD1fXAh1j1HKcnOvx/c6fhZwA71/7fFri9PTTV/JHSyj4hIgA2r81jde5s/ycz\nF0fE08BmtV7fAn4WEZOAq4ErMnPGmhZK0trN4CRpsFqUmQ/007SXVBzu+U6vk9opDpn524jYEtgP\n2Au4NiLOy8xP1q+YkgYbz3GStLZ6bRevZ9f+nw3sHBHrd3j/9cAK4N7MfBZ4EHhLXwqQmQsy84eZ\neRgwBTi6L9OTNPjZ4iRpsFo3IsZ26rc8MxfU/j8wImYCN1LOh9oVOLL23iXAKcAPIuKLlMNr5wAX\nZ2b7CdynAN+KiHmUWx9sCOyemedWKVxtujOBu4H1gLcD9/R0ISWtXQxOkgartwL/7NRvDrB97f+T\ngYOA84B/AQdl5r0AmbkkIvYBzgZuARYDlwP/1T6hzLw4ItaltBT9N+WKuMs7zKurK2eyQ//ngK8A\nL6cc+vsDMLkXyylpLeJVdZLWOrUr3t6Zmb9sdFkktRbPcZIkSarI4CRpbWRTuaSG8FCdJElSRbY4\nSZIkVWRwkiRJqsjgJEmSVJHBSZIkqSKDkyRJUkUGJ0mSpIoMTpIkSRUZnCRJkioyOEmSJFX0/wHZ\n6qOUbgP8qgAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAagAAAEYCAYAAAAJeGK1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4xLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvDW2N/gAAIABJREFUeJzt3Xl4HNWd7vHvT63FkmxJtiS8r9jEmM1gYXaSQEjMQDCZQDAh7DfOxk1mmEkGboZkhiRzJzczIcOEBEzYQlhCCAlOIHgI++JNNgYvYBDGi2zJli1rsfaWfvePLom2LFlbl9XI7+d5+lH3qVPVp1otvX2qTp8yd0dERCTZpAx2A0RERLqigBIRkaSkgBIRkaSkgBIRkaSkgBIRkaSkgBIRkaQUakCZ2Twz22hmJWZ2UxfLbzSzDWb2lpk9Z2aT45ZdbWbvBber48rnmNnaYJu3m5mFuQ8iIjI4LKzvQZlZBHgXOA8oBVYCl7v7hrg6nwSWu3u9mX0N+IS7X2Zmo4BioAhwYBUwx933mtkK4FvAMuBp4HZ3/0soOyEiIoMmzB7UXKDE3Te5ezPwKDA/voK7v+Du9cHDZcCE4P5ngGfdvdLd9wLPAvPMbCyQ4+5LPZasvwYuDnEfRERkkKSGuO3xwLa4x6XAKQepfz3Q3hPqat3xwa20i/IDmNlCYCFAdnb2nJkzZ/al7T16p7yW4RmpTBiZmdDtiogMdatWrdrt7oU91QszoLo6N9Tl8UQz+xKxw3kf72HdXm/T3RcBiwCKioq8uLi4p/b2yRn//jynTBvFT78wO6HbFREZ6sxsS2/qhXmIrxSYGPd4ArCjcyUz+xTwXeAid2/qYd1SPjwM2O02D4VIitHWpnkMRUTCEmZArQRmmNlUM0sHFgCL4yuY2YnAXcTCaVfcoiXAp81spJmNBD4NLHH3MqDWzE4NRu9dBTwZ4j50K5JitCqfRERCE9ohPnePmtkNxMImAtzr7uvN7Fag2N0XAz8BhgO/C0aLb3X3i9y90sx+QCzkAG5198rg/teA+4FMYuesBmUEX4qhHpSISIjCPAeFuz9NbCh4fNn34u5/6iDr3gvc20V5MXBsApvZL6kpKUTb2ga7GSIiQ5ZmkuinlBSjVfkkIhIaBVQ/RVKgTRd7FBEJjQKqnyJmtOoclIhIaBRQ/ZSSYupBiYiESAHVT6kpRlTjzEVEQqOA6qcUM1rVgxIRCY0Cqp80k4SISLgUUP0Um0lCASUiEhYFVD+lmHpQIiJhUkD1U2qKEVVAiYiERgHVT7GZJBRQIiJhUUD1U8T0PSgRkTApoPopoh6UiEioFFD9FJtJYrBbISIydCmg+ik2SELTmYuIhEUB1U+xYeaD3QoRkaFLAdVPkRR0DkpEJEQKqH7STBIiIuFSQPWTZpIQEQmXAqqfNJOEiEi4Qg0oM5tnZhvNrMTMbupi+dlmttrMomZ2SVz5J81sTdyt0cwuDpbdb2YfxC2bHeY+dCdFs5mLiIQqNawNm1kEuAM4DygFVprZYnffEFdtK3AN8I/x67r7C8DsYDujgBLgf+KqfNvdHw+r7b0R0fWgRERCFVpAAXOBEnffBGBmjwLzgY6AcvfNwbKDDdi+BPiLu9eH19S+00wSIiLhCvMQ33hgW9zj0qCsrxYAj3Qq+5GZvWVmt5lZRlcrmdlCMys2s+KKiop+PO3BxWaSUECJiIQlzICyLsr69B/dzMYCxwFL4opvBmYCJwOjgH/qal13X+TuRe5eVFhY2Jen7RUNkhARCVeYAVUKTIx7PAHY0cdtfAH4g7u3tBe4e5nHNAH3ETuUeMilmOEOrl6UiEgowgyolcAMM5tqZunEDtUt7uM2LqfT4b2gV4WZGXAxsC4Bbe2zSEqsg6jzUCIi4QgtoNw9CtxA7PDc28Bj7r7ezG41s4sAzOxkMysFLgXuMrP17eub2RRiPbCXOm36ITNbC6wFCoAfhrUPB9MRUOpBiYiEIsxRfLj708DTncq+F3d/JbFDf12tu5kuBlW4+zmJbWX/pFgsoDRhrIhIODSTRD+lBj0oXXJDRCQcCqh+SklRD0pEJEwKqH6KBIPodQ5KRCQcCqh+0ig+EZFwKaD6qeMQn3pQIiKhUED104eDJBRQIiJhUED104fDzBVQIiJhUED1k85BiYiESwHVT5pJQkQkXAqoftIhPhGRcCmg+kmDJEREwqWA6qcUnYMSEQmVAqqfIqbvQYmIhEkB1U8axSciEi4FVD9pJgkRkXApoPqpY5BEqwJKRCQMCqh+ah9mru9BiYiEQwHVTxFdD0pEJFQKqH6KBK+celAiIuFQQPWTZpIQEQlXqAFlZvPMbKOZlZjZTV0sP9vMVptZ1Mwu6bSs1czWBLfFceVTzWy5mb1nZr81s/Qw96E7qSmxl04zSYiIhCO0gDKzCHAHcD4wC7jczGZ1qrYVuAZ4uItNNLj77OB2UVz5j4Hb3H0GsBe4PuGN74WU9kN8CigRkVCE2YOaC5S4+yZ3bwYeBebHV3D3ze7+FtCroQZmZsA5wONB0QPAxYlrcu9F9D0oEZFQhRlQ44FtcY9Lg7LeGmZmxWa2zMzaQygfqHL3aE/bNLOFwfrFFRUVfW17j9qnOlIPSkQkHKkhbtu6KOvLf/NJ7r7DzKYBz5vZWqCmt9t090XAIoCioqKEp4hmkhARCVeYPahSYGLc4wnAjt6u7O47gp+bgBeBE4HdQJ6ZtQdrn7aZSJpJQkQkXGEG1EpgRjDqLh1YACzuYR0AzGykmWUE9wuAM4AN7u7AC0D7iL+rgScT3vJeSE+NvXRNUX1TV0QkDKEFVHCe6AZgCfA28Ji7rzezW83sIgAzO9nMSoFLgbvMbH2w+tFAsZm9SSyQ/t3dNwTL/gm40cxKiJ2TuiesfTiY3Mw0AKobWgbj6UVEhrwwz0Hh7k8DT3cq+17c/ZXEDtN1Xu914LhutrmJ2AjBQZWZFiE9kqKAEhEJiWaS6CczIyczjeqG5sFuiojIkKSAGoC8rDT1oEREQqKAGoDczDSq6hVQIiJhUEANQG6melAiImFRQA1AnnpQIiKhUUANQE5mGjXqQYmIhEIBNQB5WWnUNkWJturLuiIiiaaAGoD2L+vWNEZ7qCkiIn2lgBqAvCzNJiEiEhYF1AC096Cq6vVlXRGRRFNADUBuZuxq8+pBiYgkngJqADRhrIhIeBRQA6BzUCIi4VFADcCH56AUUCIiiaaAGoC0SArZ6RH1oEREQqCAGiBNGCsiEg4F1ADlZqWrByUiEgIF1ADlZqbqooUiIiFQQA1QXqZ6UCIiYVBADZDOQYmIhCPUgDKzeWa20cxKzOymLpafbWarzSxqZpfElc82s6Vmtt7M3jKzy+KW3W9mH5jZmuA2O8x96Iku+y4iEo7UsDZsZhHgDuA8oBRYaWaL3X1DXLWtwDXAP3ZavR64yt3fM7NxwCozW+LuVcHyb7v742G1vS9yMtNoirbR2NLKsLTIYDdHRGTICC2ggLlAibtvAjCzR4H5QEdAufvmYNl+F1Ry93fj7u8ws11AIVBFkomfTUIBJSKSOGEe4hsPbIt7XBqU9YmZzQXSgffjin8UHPq7zcwyullvoZkVm1lxRUVFX5+21zSbhIhIOMIMKOuizPu0AbOxwIPAte7e3su6GZgJnAyMAv6pq3XdfZG7F7l7UWFhYV+etk/yNKO5iEgowgyoUmBi3OMJwI7ermxmOcBTwD+7+7L2cncv85gm4D5ihxIHja4JJSISjjADaiUww8ymmlk6sABY3JsVg/p/AH7t7r/rtGxs8NOAi4F1CW11H2lGcxGRcIQWUO4eBW4AlgBvA4+5+3ozu9XMLgIws5PNrBS4FLjLzNYHq38BOBu4povh5A+Z2VpgLVAA/DCsfeiNHF0TSkQkFGGO4sPdnwae7lT2vbj7K4kd+uu83m+A33SzzXMS3MwBGZGRSoopoEREEk0zSQxQSoqRo9kkREQSTgGVAHmZmk1CRCTRFFAJkJuZRpUCSkQkoRRQCaBrQomIJJ4CKgFyM9Oo1vegREQSSgGVADoHJSKSeAqoBMgNAqqtrU8zOYmIyEEooBIgLyuNNod9zdHBboqIyJChgEqAjtkk9F0oEZGEUUAlQJ6mOxIRSTgFVALkKqBERBJOAZUAeVmxa0JpuiMRkcRRQCWAelAiIomngEqA9mtCVTXoy7oiIomigEqAYWkR0lNT1IMSEUmgXgWUmT3Ym7LDWV5mmoaZi4gkUG97UMfEPzCzCDAn8c356BqVnU5FbdNgN0NEZMg4aECZ2c1mVgscb2Y1wa0W2AU8eUha+BExaVQWWyvrB7sZIiJDxkEDyt3/r7uPAH7i7jnBbYS757v7zYeojR8JUwqy2VJZr/n4REQSpLeH+P5sZtkAZvYlM/upmU0OsV0fOZNGZdEcbWNnbeNgN0VEZEjobUD9Eqg3sxOA7wBbgF/3tJKZzTOzjWZWYmY3dbH8bDNbbWZRM7uk07Krzey94HZ1XPkcM1sbbPN2M7Ne7kOopuRnA7B5tw7ziYgkQm8DKuruDswH/svd/wsYcbAVgoEUdwDnA7OAy81sVqdqW4FrgIc7rTsK+D5wCjAX+L6ZjQwW/xJYCMwIbvN6uQ+hmpyfBcDWyrpBbomIyNDQ24CqNbObgSuBp4LwSethnblAibtvcvdm4FFiAdfB3Te7+1tAW6d1PwM86+6V7r4XeBaYZ2ZjgRx3XxoE5q+Bi3u5D6EamzuM1BRj8x71oEREEqG3AXUZ0ARc5+7lwHjgJz2sMx7YFve4NCjrje7WHR/c73GbZrbQzIrNrLiioqKXT9t/qZEUJo7KYqsCSkQkIXoVUEEoPQTkmtmFQKO793QOqqtzQ70d4tbdur3eprsvcvcidy8qLCzs5dMOzKRRWWzRIT4RkYTo7UwSXwBWAJcCXwCWdx7U0IVSYGLc4wnAjl62q7t1S4P7/dlm6KbkZ7Fldz2xo48iIjIQvT3E913gZHe/2t2vInZ+6ZYe1lkJzDCzqWaWDiwAFvfy+ZYAnzazkcHgiE8DS9y9jNj5sFOD0XtXkURfGJ6Un01tU5S9mvJIRGTAehtQKe6+K+7xnp7WdfcocAOxsHkbeMzd15vZrWZ2EYCZnWxmpcR6ZneZ2fpg3UrgB8RCbiVwa1AG8DXgV0AJ8D7wl17uQ+gmj4qN5Nu8R4f5REQGKrWX9Z4xsyXAI8Hjy4Cne1rJ3Z/uXM/dvxd3fyX7H7KLr3cvcG8X5cXAsb1s9yE1pSAYar6nnpMmjeyhtoiIHMxBA8rMpgOj3f3bZva3wJnEBiosJTZoQuJMGJmFmXpQIiKJ0NMhvp8BtQDu/oS73+juf0+sV/SzsBv3UTMsLcLYnGEaai4ikgA9BdSU4Iu0+wkOs00JpUUfcZPys9iiWc1FRAasp4AadpBlmYlsyFAxeVQ2W3SIT0RkwHoKqJVm9uXOhWZ2PbAqnCZ9tE0uyGL3vmb2NUUHuykiIh9pPY3i+zvgD2Z2BR8GUhGQDnwuzIZ9VE0eFZvVfMueOo4ZlzvIrQmHu+MOKSlJMZG8iAxRBw0od98JnG5mn+TDod1PufvzobfsI6pjVvM99UMyoMqqG7ju/mKmFmTxiyvmDHZzRGQI6+1cfC+4+38HN4XTQbQHVDLNat7Y0sqmin0D3s7m3XVc8sulvFNew9Nry3np3XAn4S3ZtY8l68tDfQ756HF3nnt7J7sSdHHQaGsbm3fXhT5FWWub8/TaMmoaNdNMb/V2JgnppRHD0hiVnd7r60IN5I9ib10zj6zYSmNLa7d1qutbWLBoGZ/66Us8/87Ofj/Xhh01XHLnUhpaWnn8q6czOT+LHz21gWjrh1dK2VSxj68/tIr1O6r7/Tzt/rphJ/N//ipfeXAVz27of7tl6Ln9uRKuf6CYz93xOiW7BvbB6/X3d3PB7a/yif94kfl3vMb/rC+nra1vf5PuzpL15Ww7yOjdxpZWbnh4NV9/aDU3P7F2QG0+nCigQjA5P4stB+lBtbU5f35rB5+57WUuuP1VNpbX9vk5tlc1cMmdr3PzE2u56p4VVDcc+Kls974mFty9jA07apiSn83/fvgN3i6r6VheWdfMt3/3Jr94sYSW1s6X5IKSXbXc8+oHXHPfCj73i9dIixiPfeU05kweyU3zZvLuzn08Vhy7+klZdQNX3rOCp9eW88W7l/Pmtqpu294cbaN0b9evj7uz6OX3+fKDxUwrHM6ssTl85/E32VmTmE/L8tF298ubuO2v7/LpWaNpirZxyZ2vs3rr3j5vp7y6kW88tJov3r2cuuYoN553FFX1LSx8cBV/c/srrNveuw9Z0dY2bnlyHV95cBVf/NUyquqbD6hTVd/Ml361nGfWl3PqtFE89VYZL7yzq4utSWd2OMy8XVRU5MXFxYfs+f7u0Td4Zn05J0zIIzVipEVSOGJEBmNyM8kZlsrvikvZuLOWIwuzqW5ooaYxynf/5miuOm0ypXsb+NNbO3hpYwU5mWlMGpXFpFFZHDs+h+Mn5JEWSWFjeS1X37uCuuYo150xlV+8WMKRhcN54Lq5jM6JfTOgdG89V9+7gu1VDdx1ZREzx4xg/s9fI5Ji/OEbp/Nu+T5ufGwNe+qaaW1zZo4Zwf+75Hhmjc3hmfXl3PPqB7yxNRYy0wqyOfuoQhaePY1xebFvF7g7X7hrKR/sruMPXz+Da+9fSXl1I//5hRP44VMbqKpr4f7rTmbO5FEdr0tztI3fry7l58+XsL2qgXnHjOGWz85ifLDNd8pr+O/nSnhqbRkXHDeW/7j0BHZUN3Dh7a9y0uQ8HrzulD4NzGhtc1Zv3UtdMKLSzDhq9HDG5nb9DYmmaCt/frOMP67ZzvETcvnqx49kxLCersvZe7v3NfFWaRXHjsvliJwPv8HR2uY8vGIrb2zdy43nHcWEkVn7rdccbeOd8hrWbKtizdYqKuubGZWdTsHwDCaOzOTzcyaQld716WR358V3K3hjaxWzJ+YyZ/IocjP7tk+1jS08u2Enp0zL7/hdHQqNLa1sq6xn+LBURgxL449vbOef/7iOC44by38tmM2Oqkauunc55TWN/OyyE5l37JhebXfd9mquvX8ltY0tfP0T01l49jSGpUWItrax+M0d/GTJRuqbW3nky6cya1xOt9upa4pyw8OreWFjBX974nj+/FYZp0wbxf3XziUSvE83Vezjy78uZtveBm77wmzOmzWav7n9FRqaW3n2xrO7/b311SvvVfDPf1zHladO5vozpxKbS/vg9uxr4ra/vsuZ0wuYd+zYhLSjt8xslbsX9VhPAZV4r7xXwZ0vvU9Lq9PW5jRGW9lV00TFvibcYVphNt86dwYXHj+OyrpmvvP4m7ywsYLxeZlsr2oA4JhxObS0trGtsoGG4BBeVnqEoimjWLN1L8PSIvz6+rnMHJPDq+/t5isPFpOXlc6scTls2FHD9qoGhmekcs/VRZwyLR+I/WFeeudSRmalUVbTyLSCbG6//ES2723glifXUVHbRMHwDHbVNjElP4urTpvCebNGM3FUVpf7+ea2Kubf8RpZ6RFa25wHrpvLqdPyKatu4It3L2dnTSMXnxi7nqQ7vPxuBdurGjhhYh6nThvFA69vBuC6M6aydns1r7y3m8y0CF/7xJHc8MnpHWH06Iqt3PTEWm4+fyZf+fiRHc/fHG1ja2U9W/bU0eZwZGE2k0Zl0Rht43fF27j/9c0H9GTN4MzpBXz+pAmcMm0Ue/Y1U17dyLod1Ty0fCsVtU0dv4eC4en8/XlHcVnRRFIjXR9scHfKaxp5u6yGyroWmqNtNEVbaY62EW1zmqNt7K1vZsUHlbwT9JTTU1O4dM4EvnL2kZRVN/Avf9rA22U1RFKMYakp3HT+TK44ZTK7apt4YOlmHl6+taOHXDA8gzG5Geyta2H3viaaom2Myx3GP184i/OPHbPfP6ZVWyr58V82smJzZUeZGRw9JofPHDOGi2aPY2pBdrfv48aWVn6zbAt3vFDC3voWstIj3HjeUVxz+hRSIym4O1sr63m/Yh9tQQe8MdpKya59bCyv5d2dtTRF20hNMVJSjCn52XzuxPGcN2s0w9IiNEfbeGPrXtZsq+JjY0Zw6rR8hqVFqG+O8ptlW1j08iZ279u/R3LOzCO480tzSE+N/T5272vi+vtX8mZpNRfPHsf3P3sMI7PTO343FbVN5A/P6AiMl96t4Ou/WUVeVjr3XXsyR40eccB+b6us57K7ltIYbeO3C09lRqc67s5rJXv4t6ff5p3yGm6dfyxfOnVyx/v06584kn/49Me477UP+MmSjWSmR1h0ZRFzp8Y+rK3cXMmldy7ly2dN5bsXzOrytW9t84429+TBpZv5lz9tICstQm1TlCtOmcS/XHQMacF7tjnaRkNL634fTJZv2sM3H32DnTVNAFxxyiRuuXAWw9IivXrOvrSvKwqoOIc6oLrT0tpGZV0zBXF/MBB7wz+4bAv/s34nZ84o4ILjxnaEQvsf2eqtVbz+/m5eK9nN8GFp/PzyE/cLjnXbq/nmI29gBrPG5XLMuBzOmzWaIwuH79eGJevLueHh1VwyZwK3XDir4xNcTWML/7lkI1sr67l87iTOPXp0r96AN/52DU++uYM7vzSH82aN7ijfVdPI1x9azQe764j9zzSmFmTxjU9O5+NHFWJmbK9q4Ad/2sAz68spHJHBNadP4YpTJpGXlb7fc7g7X39oNUvWl5OXlU5KsL3KuiY6ny5Ij6QQSTEaWlo5aVIeV58+hYmjsnCP/VG9WrKbJ1aXUrq34YB9+fhRhVx/5lTOmlHAW6XV/PCpDazcvJcUi4VKRmqEYWkpZGekMjwjlfRICpt211FZd+BhnXiZaRHmTB7JaUfmc/yEXJ5eW87vV5USbWujzWF8XibfveBojp+Qy81PrOWV93ZzZGE2W/bU0+bOp2eN4cITxjJ7Yh7j8zI7QsjdKd6yl+89uZ63y2o4bVo+Uwqyqahtoqy6gfU7aigckcE3z53B/NnjWLe9mpUf7OW1kt0doXXc+Fw+NmYE2ekRsjJSaXOnpqGFqvoW3txWxY7qRs6aUcB1Z0zlwWVbeP6dXcwam8Ox43N4rWRPxweqeGYwJT+bo0YPJzsjldY2J9oa682WVTcyYlgqx43PZc22KuqbPzx/OiwthVOm5rNuezV76po5c3oBnztxPE3RNmobW0iLpPDFUyYd8E+0KdrKHS+8zy9eKCEvK42rT5vCu7v2sWzTHipqm8hOj3DCxDwm52fzWPE2PjZ6BPdde3LH0YaufLC7jsvuWooD//a548hKjz3nxvJafrNsC5t215Gfnc5/XHoCn5x5RMd6Nz+xlkdWbGXmmBG8U17Lp44ezb997tj9eszt9X67cis/uPhYGppbKd3bQOneBsqqGyirbqSyrpnMtAgjs9IYmZ3OuLxMphZkMyU/m9E5GQxLi5CRmsKf3tzBA0u3cO7MI7htwWx++eL7/PLF9zlrRgGfPX4cL2zcxSvv7WZfU5RZY3M4Y3o+kZQUFr38PpPzs7ntstn8ZW0Zd728iZljRvDtz3yMvKx0sjMipKakUNPYQk1DC5V1zby3ax/vlNXwTnkt844dw/c/e8xB3/cHo4CKkywBlSwaW1p7/UmpJ83RNsqrG5mU33Uvqze2VdZzRE4GGandt6m6oYW7X95EdUMLre64O4XDM5hSkM2UgmyM2Ki/kop91DVF+fxJEzixmxnl29qcFZsreW/XPo4YkcHonGGMz8ukcETGfvXcnb++vYu3SqtoirbFPok2t7KvOUpdU5TGllam5Gcza1wOR4/NYUzOMDJSU0gPbqkpKaRFrMvDLTtrGnlw6RYy0yNcd8ZUMoN/gO7O71aVct9rmzltWj7XnjGl2x5su2hrGw+v2Mrtz5UATsHwDI7IGcap00ZxzelTujyMVFbdwFNvlfH02jLKqxupa26lvjmKYeRkppGXlca4vEy+cvY0zphe0NG2Z9aV869/2kBDSyunTcvnjOn5HDM+l9QUwzAiKcbUguyO/YnX2uYs27SH368uZcOOGoqmjOTM6YWcNDmPDTtqeOGdXbxSspuJI7P45rnT9zs83BsbdtTwnd+/ybrtNYzOyeDUafkcNz6XrZX1vLG1irfLajhrRgH//cWTGJ7R86G1kl21LFi07IBe3EmT8rjytMmcf+zYLsPyi3cv572dtfzr/GO4ePb4Ln//1Q0tfOqnL1FRG+vBDM9IZXxeJuPyhjEuL5OC4RnUBdeW21vfzLbKerZU1tMcPfBc8ZfPmspN5x/d8YHyseJt/J8n1hJtc8bkDOOTMwsZk5PJsk17WLVlL82tbcyfPY4ffe64jtfhxY27+IfH3mTPQT5spUWMIwuHc/TYHM6ZeQSfPWFcj69hdxRQcRRQIomTzF/Ubm1z9uxronBExgHBEG1t6/ZQbXf21jVTEvcVjZFZaUw/4sDDgvGaoq1EW53sHkKwrLqB3bXNTByVSW5mWo/njVrbnB1VDeypa6appZXGaBu5mWnMnph3QN3Nu+uob27l6LEj9ttuQ3MrZdUNTC3IPuD5qhtaeG9nbezDSlOU5tY2cjLTyA1uk0ZldRw2HCgFVBwFlIhI8uhtQGmYuYiIJCUFlIiIJCUFlIiIJCUFlIiIJKVQA8rM5pnZRjMrMbObulieYWa/DZYvN7MpQfkVZrYm7tZmZrODZS8G22xfdkTn7YqIyEdfaAFlZhHgDuB8YBZwuZl1/tr09cBed58O3Ab8GMDdH3L32e4+G7gS2Ozua+LWu6J9ubtrUisRkSEozB7UXKDE3Te5ezPwKDC/U535wAPB/ceBc+3ALwNcDjwSYjtFRCQJhRlQ44FtcY9Lg7Iu67h7FKgG8jvVuYwDA+q+4PDeLV0EGgBmttDMis2suKIi3OsWiYhI4oUZUF0FR+dvBR+0jpmdAtS7+7q45Ve4+3HAWcHtyq6e3N0XuXuRuxcVFhb2reUiIjLowgyoUmBi3OMJwI7u6phZKpALVMYtX0Cn3pO7bw9+1gIPEzuUKCIiQ0yYAbUSmGFmU80snVjYLO5UZzFwdXD/EuB5D+ZeMrMU4FJi564IylLNrCC4nwZcCKxDRESGnMTagmYsAAAMFElEQVRcLasL7h41sxuAJUAEuNfd15vZrUCxuy8G7gEeNLMSYj2nBXGbOBsodfdNcWUZwJIgnCLAX4G7w9oHEREZPJosVkREDilNFisiIh9pCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKCigREUlKoQaUmc0zs41mVmJmN3WxPMPMfhssX25mU4LyKWbWYGZrgtudcevMMbO1wTq3m5mFuQ8iIjI4QgsoM4sAdwDnA7OAy81sVqdq1wN73X06cBvw47hl77v77OD21bjyXwILgRnBbV5Y+yAiIoMnzB7UXKDE3Te5ezPwKDC/U535wAPB/ceBcw/WIzKzsUCOuy91dwd+DVyc+KaLiMhgCzOgxgPb4h6XBmVd1nH3KFAN5AfLpprZG2b2kpmdFVe/tIdtAmBmC82s2MyKKyoqBrYnIiJyyIUZUF31hLyXdcqASe5+InAj8LCZ5fRym7FC90XuXuTuRYWFhX1otoiIJIMwA6oUmBj3eAKwo7s6ZpYK5AKV7t7k7nsA3H0V8D5wVFB/Qg/bFBGRISDMgFoJzDCzqWaWDiwAFneqsxi4Orh/CfC8u7uZFQaDLDCzacQGQ2xy9zKg1sxODc5VXQU8GeI+iIjIIEkNa8PuHjWzG4AlQAS4193Xm9mtQLG7LwbuAR40sxKgkliIAZwN3GpmUaAV+Kq7VwbLvgbcD2QCfwluIiIyxFhsMNzQVlRU5MXFxYPdDBERAcxslbsX9VRPM0mIiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSUkCJiEhSCjWgzGyemW00sxIzu6mL5Rlm9ttg+XIzmxKUn2dmq8xsbfDznLh1Xgy2uSa4HRHmPoiIyOBIDWvDZhYB7gDOA0qBlWa22N03xFW7Htjr7tPNbAHwY+AyYDfwWXffYWbHAkuA8XHrXeHuxWG1XUREBl+YPai5QIm7b3L3ZuBRYH6nOvOBB4L7jwPnmpm5+xvuviMoXw8MM7OMENsqIiJJJsyAGg9si3tcyv69oP3quHsUqAbyO9X5PPCGuzfFld0XHN67xcysqyc3s4VmVmxmxRUVFQPZDxERGQRhBlRXweF9qWNmxxA77PeVuOVXuPtxwFnB7cquntzdF7l7kbsXFRYW9qnhIiIy+MIMqFJgYtzjCcCO7uqYWSqQC1QGjycAfwCucvf321dw9+3Bz1rgYWKHEkVEZIgJM6BWAjPMbKqZpQMLgMWd6iwGrg7uXwI87+5uZnnAU8DN7v5ae2UzSzWzguB+GnAhsC7EfRARkUESWkAF55RuIDYC723gMXdfb2a3mtlFQbV7gHwzKwFuBNqHot8ATAdu6TScPANYYmZvAWuA7cDdYe2DiIgMHnPvfFpo6CkqKvLiYo1KFxFJBma2yt2LeqqnmSRERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQpKaBERCQphRpQZjbPzDaaWYmZ3dTF8gwz+22wfLmZTYlbdnNQvtHMPtPbbYqIyNAQWkCZWQS4AzgfmAVcbmazOlW7Htjr7tOB24AfB+vOAhYAxwDzgF+YWaSX2xQRkSEgzB7UXKDE3Te5ezPwKDC/U535wAPB/ceBc83MgvJH3b3J3T8ASoLt9WabIiIyBKSGuO3xwLa4x6XAKd3VcfeomVUD+UH5sk7rjg/u97RNAMxsIbAweLjPzDb2sf0FwO4+rjPU6DWI0esQo9dBr0G7gb4Ok3tTKcyAsi7KvJd1uivvqsfXeZuxQvdFwKKDNfBgzKzY3Yv6u/5QoNcgRq9DjF4HvQbtDtXrEOYhvlJgYtzjCcCO7uqYWSqQC1QeZN3ebFNERIaAMANqJTDDzKaaWTqxQQ+LO9VZDFwd3L8EeN7dPShfEIzymwrMAFb0cpsiIjIEhHaILzindAOwBIgA97r7ejO7FSh298XAPcCDZlZCrOe0IFh3vZk9BmwAosA33L0VoKtthrQL/T48OIToNYjR6xCj10GvQbtD8jpYrMMiIiKSXDSThIiIJCUFlIiIJCUFVCeH61RKZjbRzF4ws7fNbL2ZfSsoH2Vmz5rZe8HPkYPd1rAFs5a8YWZ/Dh5PDabiei+Ymit9sNsYNjPLM7PHzeyd4D1x2mH6Xvj74O9hnZk9YmbDDof3g5nda2a7zGxdXFmXv3+LuT34n/mWmZ2UqHYooOIc5lMpRYF/cPejgVOBbwT7fhPwnLvPAJ4LHg913wLejnv8Y+C24DXYS2yKrqHuv4Bn3H0mcAKx1+Owei+Y2Xjgm0CRux9LbGDWAg6P98P9xKaZi9fd7/98YiOtZxCbHOGXiWqEAmp/h+1USu5e5u6rg/u1xP4hjWf/6ageAC4enBYeGmY2AbgA+FXw2IBziE3FBYfHa5ADnE1slC3u3uzuVRxm74VAKpAZfE8zCyjjMHg/uPvLxEZWx+vu9z8f+LXHLAPyzGxsItqhgNpfV9Mzje+m7pAVzCp/IrAcGO3uZRALMeCIwWvZIfEz4DtAW/A4H6hy92jw+HB4T0wDKoD7gkOdvzKzbA6z94K7bwf+A9hKLJiqgVUcfu+Hdt39/kP7v6mA2l9vpmca0sxsOPB74O/cvWaw23MomdmFwC53XxVf3EXVof6eSAVOAn7p7icCdQzxw3ldCc6xzAemAuOAbGKHszob6u+HnoT2N6KA2t9hPZWSmaURC6eH3P2JoHhne3c9+LlrsNp3CJwBXGRmm4kd3j2HWI8qLzjEA4fHe6IUKHX35cHjx4kF1uH0XgD4FPCBu1e4ewvwBHA6h9/7oV13v//Q/m8qoPZ32E6lFJxruQd4291/Grcofjqqq4EnD3XbDhV3v9ndJ7j7FGK/++fd/QrgBWJTccEQfw0A3L0c2GZmHwuKziU2q8th814IbAVONbOs4O+j/XU4rN4Pcbr7/S8GrgpG850KVLcfChwozSTRiZn9DbFPze1TKf1okJt0SJjZmcArwFo+PP/yf4idh3oMmETsD/ZSd+988nTIMbNPAP/o7hea2TRiPapRwBvAl9y9aTDbFzYzm01soEg6sAm4ltgH2sPqvWBm/wpcRmyU6xvA/yJ2fmVIvx/M7BHgE8Quq7ET+D7wR7r4/Qfh/XNio/7qgWvdvTgh7VBAiYhIMtIhPhERSUoKKBERSUoKKBERSUoKKBERSUoKKBERSUoKKJFDxMxazWxN3C1hszOY2ZT4madFhoLQLvkuIgdocPfZg90IkY8K9aBEBpmZbTazH5vZiuA2PSifbGbPBdfYec7MJgXlo83sD2b2ZnA7PdhUxMzuDq5f9D9mlhnU/6aZbQi28+gg7aZInymgRA6dzE6H+C6LW1bj7nOJfSP/Z0HZz4ldxuB44CHg9qD8duAldz+B2Bx564PyGcAd7n4MUAV8Pii/CTgx2M5Xw9o5kUTTTBIih4iZ7XP34V2UbwbOcfdNwYS95e6eb2a7gbHu3hKUl7l7gZlVABPip9cJLpHybHAxOczsn4A0d/+hmT0D7CM2Vc0f3X1fyLsqkhDqQYkkB+/mfnd1uhI/H1wrH55jvoDYlaLnAKviZuIWSWoKKJHkcFncz6XB/deJzaoOcAXwanD/OeBrAGYWCa6A2yUzSwEmuvsLxC7EmAcc0IsTSUb6JCVy6GSa2Zq4x8+4e/tQ8wwzW07sQ+PlQdk3gXvN7NvErnB7bVD+LWCRmV1PrKf0NWJXfO1KBPiNmeUSu7DcbcHl20WSns5BiQyy4BxUkbvvHuy2iCQTHeITEZGkpB6UiIgkJfWgREQkKSmgREQkKSmgREQkKSmgREQkKSmgREQkKf1/r6EdopNRh/0AAAAASUVORK5CYII=\n", "text/plain": [ - "" + "
    " ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], @@ -446,7 +449,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -543,6 +546,30 @@ "\n", "
    \n", "\n", + "*get_params(deep=True)*\n", + "\n", + "Get parameters for this estimator.\n", + "\n", + "**Parameters**\n", + "\n", + "- `deep` : boolean, optional\n", + "\n", + " If True, will return the parameters for this estimator and\n", + " contained subobjects that are estimators.\n", + "\n", + "**Returns**\n", + "\n", + "- `params` : mapping of string to any\n", + "\n", + " Parameter names mapped to their values.'\n", + "\n", + " adapted from\n", + " https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py\n", + " # Author: Gael Varoquaux \n", + " # License: BSD 3 clause\n", + "\n", + "
    \n", + "\n", "*predict(X)*\n", "\n", "Predict targets from X.\n", @@ -560,6 +587,25 @@ "\n", " Predicted target values.\n", "\n", + "
    \n", + "\n", + "*set_params(**params)*\n", + "\n", + "Set the parameters of this estimator.\n", + "The method works on simple estimators as well as on nested objects\n", + "(such as pipelines). The latter have parameters of the form\n", + "``__`` so that it's possible to update each\n", + "component of a nested object.\n", + "\n", + "**Returns**\n", + "\n", + "self\n", + "\n", + "adapted from\n", + "https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py\n", + "# Author: Gael Varoquaux \n", + "# License: BSD 3 clause\n", + "\n", "\n" ] } @@ -587,7 +633,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.4" + "version": "3.6.7" }, "toc": { "nav_menu": {}, diff --git a/docs/_site/site/user_guide/regressor/LinearRegression/index.html b/docs/_site/site/user_guide/regressor/LinearRegression/index.html index 1884eba50..4d6142e23 100644 --- a/docs/_site/site/user_guide/regressor/LinearRegression/index.html +++ b/docs/_site/site/user_guide/regressor/LinearRegression/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,21 +930,25 @@
    @@ -1020,7 +1032,7 @@

    Example 1 - Closed Form Solution

    Intercept: 0.25
     Slope: 0.81
     
    -

    png

    +

    png

    Example 2 - Gradient Descent

    import numpy as np
     import matplotlib.pyplot as plt
    @@ -1036,8 +1048,8 @@ 

    Example 2 - Gradient Descent

    print_progress=3) gd_lr.fit(X, y) -print('Intercept: %.2f' % gd_lr.w_) -print('Slope: %.2f' % gd_lr.b_) +print('Intercept: %.2f' % gd_lr.b_) +print('Slope: %.2f' % gd_lr.w_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') @@ -1048,12 +1060,12 @@

    Example 2 - Gradient Descent

    plt.show()
    -
    Iteration: 100/100 | Cost 0.08 | Elapsed: 0:00:00 | ETA: 0:00:00
    +
    Iteration: 100/100 | Cost 0.08 | Elapsed: 0:00:00 | ETA: 0:00:000
     
    -Intercept: 0.82
    -Slope: 0.22
    +Intercept: 0.22
    +Slope: 0.82
     
    -

    png

    +

    png

    # Visualizing the cost to check for convergence and plotting the linear model:
     
     plt.plot(range(1, gd_lr.epochs+1), gd_lr.cost_)
    @@ -1064,7 +1076,7 @@ 

    Example 2 - Gradient Descent

    plt.show()
    -

    png

    +

    png

    Example 3 - Stochastic Gradient Descent

    import numpy as np
     import matplotlib.pyplot as plt
    @@ -1079,8 +1091,8 @@ 

    Example 3 - Stochastic Gradient D minibatches=len(y)) sgd_lr.fit(X, y) -print('Intercept: %.2f' % sgd_lr.b_) -print('Slope: %.2f' % sgd_lr.w_) +print('Intercept: %.2f' % sgd_lr.w_) +print('Slope: %.2f' % sgd_lr.b_) def lin_regplot(X, y, model): plt.scatter(X, y, c='blue') @@ -1091,10 +1103,10 @@

    Example 3 - Stochastic Gradient D plt.show()

    -
    Intercept: 0.24
    -Slope: 0.82
    +
    Intercept: 0.82
    +Slope: 0.24
     
    -

    png

    +

    png

    plt.plot(range(1, sgd_lr.epochs+1), sgd_lr.cost_)
     plt.xlabel('Epochs')
     plt.ylabel('Cost')
    @@ -1103,7 +1115,7 @@ 

    Example 3 - Stochastic Gradient D plt.show()

    -

    png

    +

    png

    Example 3 - Stochastic Gradient Descent with Minibatches

    import numpy as np
     import matplotlib.pyplot as plt
    @@ -1133,7 +1145,7 @@ 

    Example 3 - Stoc
    Intercept: 0.24
     Slope: 0.82
     
    -

    png

    +

    png

    plt.plot(range(1, sgd_lr.epochs+1), sgd_lr.cost_)
     plt.xlabel('Epochs')
     plt.ylabel('Cost')
    @@ -1142,7 +1154,7 @@ 

    Example 3 - Stoc plt.show()

    -

    png

    +

    png

    API

    LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0)

    Ordinary least squares linear regression.

    @@ -1227,6 +1239,29 @@

    Methods


    +

    get_params(deep=True)

    +

    Get parameters for this estimator.

    +

    Parameters

    +
      +
    • +

      deep : boolean, optional

      +

      If True, will return the parameters for this estimator and +contained subobjects that are estimators.

      +
    • +
    +

    Returns

    +
      +
    • +

      params : mapping of string to any

      +

      Parameter names mapped to their values.'

      +

      adapted from +https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py

      +

      Author: Gael Varoquaux gael.varoquaux@normalesup.org

      +

      License: BSD 3 clause

      +
    • +
    +
    +

    predict(X)

    Predict targets from X.

    Parameters

    @@ -1243,7 +1278,21 @@

    Methods

    target_values : array-like, shape = [n_samples]

    Predicted target values.

    -
    + +
    + +

    set_params(params)

    +

    Set the parameters of this estimator. +The method works on simple estimators as well as on nested objects +(such as pipelines). The latter have parameters of the form +<component>__<parameter> so that it's possible to update each +component of a nested object.

    +

    Returns

    +

    self

    +

    adapted from +https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py

    +

    Author: Gael Varoquaux gael.varoquaux@normalesup.org

    +

    License: BSD 3 clause

    @@ -1251,7 +1300,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_13_1.png b/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_13_1.png new file mode 100644 index 0000000000000000000000000000000000000000..6f19f0c7ea16ac2f06210a9d69e40bee69c6ab30 GIT binary patch literal 7168 zcmaiZc{r5s+rFikM97{blU+iTkZp`IlqEaaia}+|R+zEx%R~*?O{pxQ5R-jLwy`8W z8d*kT$-b{+dGGoD-tY1K{qY{}>o}OlbI<);*L`2td0y9jKCy=Sw^^9ZG11V_upo5c z_h@Kn-NE$%gaQ0D57hStA1AzF2onhS34%Drfd5Z<>R5vNr_WMCr_M4g zZiz=UH2gLQxTeXotfev6SFfhizpov${BW?6v)6cQH0O0SHU#p3FHry;AQi zm$t&Agfk4cAKv6Ofg2L7G>kIy$~({b5l_|<6?tCV(#{_e*QGDNxiRT3rfFG(8xMH= z6+P=&w6HW^RIBzZ`}Zu0`?SzSC^Y@8)#%BX7@Z+;yf*x5LYOsNxb^?M43ddcEHVC| z-_h5H`BS0|FTdyNIwvL+Q4?EtJ%!ZU*Y_bmUpTIso4r#PX_or;@ih}DG{u2)6bj|L z)hzwLUNSx+-@;SeEgIEC1V50t*_GJd-qH+bSe<$)iw@FNy9cJ1tN&9LU0fQbcO`V> zi$m{u{L%iapi1C*z9cM!mOc-S;+3PjfacZJn;bMYHiN;M-H?w7Ph!)h73AGuQ0UjW zG5^WMipePcJflk%m5}Ae+&|{5qUwT0rr`%c;#ZQY5{jN&^)-V`zU+`>XGfU-uF|Y_ zU!7HNZD!QURrCCL*RbSVTU&prfQiYjUU%1ll7@qW!n8{P9Pa3$tB13c|ND!*S6$HT zV@RgQ_V&5IgLApK!^AkI#tT?@xC#vi;o~4-WNG;%@}FLUF5zQ-UER&+TyZhq9Da)K zk_A;O)sJ|i&O|!2lK#H+e@-0vQhU6iYGV)u53kjR;3PcN`kM(>Xv#^Uh&OC7O$aW} zeJo;|j)xs#z#+EJyUk(Hl&ILUKJD4YRg0SQYF|2%-u6-DJIl1p)>6~e&eG(WXXzsn z8}n`IrvXaW?st`Ezox9)xy_~1Umh*387TWl)z#UQO;7J_6)ei*dluQ`i}!l^G_37i zsk8DeAhIqm4A%G+Iv5Lhq{C7t`0no}wZOzB!veON2!io(Z))XE#r8-lo#k~$PSl|d zA(JaVu)@$3n3Gd~gK7f*ESdOHj3SHR;^e-?KR8j-?)k>T8jJZ|>nLE>lnoaZyS zKh0?o;2vCY4z24Q#EE z9Y{?HEN+IL@jBP})kb%^mWTUTl`YD@&AK?>V8O=dY+F|MI>#^zp1GUR)RAmx$mVZ~ zc$;JmFK_S7ZrFc^M&+iBy^HTV-}$DW!FLpzB0V0^yW_;Q$XfU zCTwpFH^}Lbzkhlrx4E)XELfpXsY&&w{Hr%gsAu5aYPTH>b78er@Vkxa)Sy?q&dlDt z6n;bBWqkAlCGq?znZ_)R|F^K83H*%JqjW2ctkvU8n45Ib0^I(l67PUhS6ic48ct4} zNAWV`XuLCR#k44hR)RBx2Y*9n3i#DHV^i`ynyB@s7on=SZt<=wU4~gk- z=ApMO?M|hWskZlt4P%O-%4_gvWR|}~4kW?Q#r$5oG<42s5(c1R#l&OBTp!RZ*Z$4M z)XIoD;IM)G`|C&Ahu~G(x^;>Ohbh zg%DnImp@qTdAUb(QUAEq$?BMVTZkIxy~YN?NBy7Qu3*P%d`GcPC#Z3Fgjx+Pdv^XB zlfF`$LB8vN@N&`BIAwQsL}IoI%E~UBLDn_JU?-5w61j~w1P=YMZWv{q&sOE{Rbt%V z3?*YQwM7ZkJ$h5&TP$PbF>u1Hbo1Q;xm~J~^#yoN?Ds`t!wX~^)5Q7tPBEPO7{0iR ziJC@Vue(Q_5g={;1lZ(N;kFGx;IiRNz;I6TrNfzrE90K+0-v7nWd);0O`Qb58qVXl z77qV@(vyj7WsO$y4sx05*CLl*mXg40J03^U&@=49GQiaS@qv3cwg&N1-H&RNB%qYp zF@IEWa-lKKNe>_?_ef#6=xG>mtT4mvcb*=P%OGT?Z>K3o*%X%zqx$6~BBIvj@dJwg z!}M(5^~U~R;II-HaGbdA7FN5m{ds|f-~N{@ud)eFfCFS^xj%>K?pMs?e|8_HT`?>+ z8#SF)pxzG5MKcZCvCih4KHm0_Mb)gxz3ETN23+dLMaO*Zgjf~{vooZpfjYC!y(yN#z&WW1Eu%`rk4tJo&2yRSjtR zXvwssV2Z`1sDyACaY3Pm_nG(h3`;W9xp_X2w9+nz{?rdwS&1+!30hg4IKEJ1ZqOXv zeWW7}sE3yX%d@MTj+rX`^S~~~!fd9M;N*04vwU^Dysf<~^LpgY_{Q4ekGGR}GIU=9 zkqTE?UT&&y>V-UJ?{v*p*3@n*9hQ_l2pG7VdnS>eG7gQ5Y;fKlG7q;rAt>4jnCE&$ zd#p+7j{n`SSrL&Q7k!!)8nS=)D8)-1xB~tc*>rENxEk1@vmTFggY< zot#vA(la)_gqDH6q-XJ+N27RzAty{6BE&g36g3Amt@^+om!Ei_nSUnGX=j0PXOvzq zZGR7w;Je zqQv@M9}I#B{uve)W_a&j%H|GN@`?H_?ol5O!fV%frX-F7c?kw6FJJE4d$xNUKg zwQy$8;TKFvmQM=Cft8o$rKm(@MM7#_ykk7zr?SO&dXq)PL0jbH+uv=la1DWKo%fV^ zDRraz=ia%?ijO#%FMRCpeE`z$D)HCn28lt+idOWQ)s9Ps706Lj#5VwV$2Tneq~XS> z(=riStXnL&9JCcK`x58~(uGg(-su0Z%Ob$Us(Ruy_pWU06f;OO$GD+l)YtudME{$} z6|7Aoz!!oUJSe|*>Gx!;80i}aYDTBbwgK@9rs7;k=0-S!xv-3Zd-%P+|E?gYEN3N$Zd`mY|EY)jFfF5OdqL%tU&|7SvAjpEl2bEWOM{U3IlYpkDjP zHgbWHp&?0ZK*9aIziG-0@b#O+to~o(PlNpQn|w9C;fmviE_yKP8b->U0D61R-`KhT zcZ~mttp2A|_iBu?d$I|!hU@6S`Np2(Gm6`@HEs3uTR3Ucd=MZmWY7AQz&0lQ%sIhx zKX4=Ex!^%?SMsgKy=VJ#hDMVD+wn4zmD3@B@ZVg0nm-Y=_>*zDrrY4X(n=lJipzN< zZba4A!emlay4RjJU3^4|3p$nxO&wpKew408p50t7D;bwAAu0KT^7mtdYAT_Tx9xwcxna=z) zjP1yIh28V)m^fZ06A$c{E-P$E$9)8#O`5vJ0p2$3yu{b&>_Cvviv{5b?$UM_y?4oxI z6kejow61!A|I8hipA7rkk;eyp_~Nrf56U9G=VMfQk9>tTvWh^spsVcwWn5e+o1Bcs zc|EkKB6!@?&noCoO-So7>@>^sh2Z4+Pu%Dupf9*tLK*)iDhBaRg))-jr0m<6a_60^ zy1V1zg{WQdM*E*XH{!SoGf)ivo_3~css-zIF_n_Zhg;KWtCWdW*ALA+c1;CF4XE`zkF>tX4l+ z_4g;*(uNVO{ZFkyS_L0+!ZNPy>^oC=x<;p_V8Ew|t!1`pq3L<;o{bR%cQL}yu-bi0 z7%NuJBh=Is3X1zL+1+ltgQoGB9*+GDRX43SO9Jr+ zL_`q5cAZ`uvlLjy-p1sB9?scd_Xh?fm9PZiqe6t?i;l-%rfdf6v`>x($|yW$Soo*; zzv=5~YHFQcK;})Nn0cA)(%AM}42;@ZiJX=49Zksr^o6EaPdxe(;QLy6h%t|A1j^)& zkQZv|mZ9`~PqPhEKRm@Pz215TexlznHvi?9$CLk9qfSosp&3Avz+eO|KkYkuwoyPO zC_SJxN=HpdQnRfkzsI{GjGG?nj@tp9xWmAbi`Cn&hx ztD=^p>YfF&7N!L$y@l-EEJR6}w_F?6sCLggbc3G--@Z2+FLN+M=5ubE=|OLksm!Ha zq|+>2xHj^*fzM>3!E@N=lTRVWHC_h`kYpu@I?=d$^1dJ6U?EvisMa@Ji#4(KYI4Fm zQmRV?4FEeV!{{w3`F39Zw^`@-4%#0*2t-5m#P*@~`@0)l7!;+&0Dacq6l$p@P%Ye7 z<+eK9*n$w-Yi2myZ+<1ge^*7YBz6?61fGDJFfqfc6t)IFdD4peh zVX%N!9;s1g3#@P&dFuUvt*UD4-)P<;ph>iir=IW6fg+M(tY89-wXFCat^FOdv$lvn z4B|IqM=1G@miw#t?H>JN;cxk9xw`q2TJNldug~M1`alS64#tBxuGDn2f5xa@2hKjt zQFh-e?a4g+*d9hFE*0J!?l7ejS*4Q!dg9uELtnenqh1wsSHN(o=k6r%6nYF3sKy5$ zv_@-#155iCvAbck^qeD5SDohU1z3oXR#4gEY)*LKN~DNh-rY>R6i2dZzaioZWmW5M ze>1C+%LwQ9q%HZ2+QKD;>Tng1d%5g_S%UXYE4b2ir?qbM1y+~zb9dgD$9p$tDUCbHSTmU0v5eKW zvfut@gN<1f^BpBFcFRm||Hf&b{)K11g^AJ(2fp&@H)ogJ!fa<^R09K^fmuAZydcsk zCzzI>Y^=4f7TYzq@lcL_RXA&5E>Ig1Y^+X-IKDy~(h%|nH znz_VJCBUO}tLXis+2-lh8I@mKhP!lkxwW;u$YmWW+I#5bH7z50cikbm)CTAkL3{gL z=HDy|fmD#&ECAaTkC1I#pOp#a3K^-5;pCwxF*rG?8RdUuRvzD!D7HFc0LPyq^Bu3n z1{BV$1mDe-%xf|?lb=3KFGlbER{U0;v6vu>S72NSs-gwq_uruSAO_-M0$X)jMTx=*k#MY8O18Zu?8XeFLo z;}(a0)}yxW7jIU^Ye8E5b~4yt|KVfDe_k7}W5$}@ZuUGq=P6p|EoLx~r4(eel8@Kp zNLKjVHy46LY6RN@ox^%lWct%Py>$Qn#6gEE+5wyZuZb`%FEnMUo;-e zTU+d{gvBg$Nn2Mn(X%#niW$$wn-Xfiyxwk4bSjxPF3XXLxO_RGR)-zI1L~y~M4nbp zQ~ImxcjgHxN=Y?olQAX$@6$3G*~$Q; zP*?qCfoS$i029u+k+myP@APnPzdX!A7&2LP-pyGx3 z4M{&TTcy6%y`ScLQe@-NkpG61cueh8_D+ov0og|$_0x}keK3dy-Gc=5c*~B8O1?V+ z{w7s!;kPe@eosWD2Q;E(o#J5D@H4NKm&FO0vw==BmUMs*Z*FlOtVUC3Si%JSucEfN zMt#$AH+G56t0RwByQG-h*b$Pxqn%gX&sTb^P(FM1!`Eq^|MFX4GdTqGr zB#(ye1<)L+1_HziUu9Z8a$)y(f~HI>n3SuJ2eMMpp&v2R zgG%yijiOpt0@h8ROq?03X^s_Z$x=d>?-SCg#wC15W3BL@yT_)sX!B^6*?KO1{?`N@ zft`jYG%rKLiq;e`9#NV00@hC^77vUh{bW zS;6B~hva`sP}?2gT+rAEwqzhIjA9o_Q`RtVd;kt;N3+2}xyjev{*>NkFyOKT8-+LX% z24#y9#-s%KyaI^+4Ixkj;3=Yk%FAxQNk4Amj4p{wtSJ%88cX}RlRv%`5b$>`^kDkp zDRdwGRnXIO^9R-4v$2F_yrNr}`Vo7^wQ!7j6i&S(LKR%JRR6RXF;cM|-_v ztif8QA}df*19qYUwm@S97xtSQ_VOUo0ZpPky2N%#WjB%UZ9g}A;VWB#9Z(=^Cz^kI zwE7M=a~S*!v>+B{+eEKSB8~iSd!*jm%lUX-_l6+)mHNYL!x!Xo{#4O}?g(0&tkZsY zbCr1&W}*#f^jt<|Hw!qR8fo$a>Th^0QUM!5VsRMRa}TXuEU-S zQC}I9=rZ79@4SPkRdPix5k7t+=yr8?7jIVot2vF)xuR#~8O;_4g8;=gtj2f2Y|aCt zQu=6E8~)VN>JH=Lf6o5ea7S}j_R+{lwBgC;4UAJ@Nn6|J!Wtd`n{N z3Z0dVZrx8O3K^mE!cv0r8kdB;O8z=h z1*^s27Wy$;nj2=pTIYdOFR2@3i17()R_n{UgEbM*%|cqy4AbdJwxei|IAOf}5APZ& zGx540ht}RtV%cE&cQD;@x9M8zWhMig^4MA?$kGx|tLfXY^15d9jfv~614!k(m>A(( h|6fA>pKq3qSSS3~s}yAiM8VrS8ick!ycA|1_Fqus?;`*J literal 0 HcmV?d00001 diff --git a/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_15_2.png b/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_15_2.png new file mode 100644 index 0000000000000000000000000000000000000000..ebc65b0b2acff0437632a1f41350a3efa1014743 GIT binary patch literal 7125 zcmZvB2|UzY`!^}ZzGN#~Ge%?&-9(wY$uhR2kX-)Wak$3NCGBaFcprWE;)qoSgA z27ezxXu-SgZ32n=5LR5$Uw?}GNk z+PK?NIoWu+I=Xl|Vjf+3X6x>OadEyRFD)y5Rq~R(r>Cp3jLiQ$AnoFAC&TO%b3#SM zC#R)$%h3DH@^8#*!|6|7)=#-(*e6~GmUnBl|0w|*8Vlq(7ujT#{du=Kx^f^g;;Q|l zJ_op3jMgP_0|Q!|0!xZGG>e1upGEbQ{%XJT}ZM_7p&KS~7eWGEYT zxkwp%@u>eSVO4cFe0yBiiiAW&pxl?ZvD7fwJw*m*c3g;oYg7b0I>=lNZo^52!RWKc zxY@OM&s9aB8pR0eY8)C3Qm}YgqUL&TN4g@bkB^T*&8G;IaheKt<$w(tWoX5x{or^bAS)r%Ha8xsfRcgFJm_pJM})toI(lV3Bf< zX^$p&K6qZSaHl6@g%n)sz9*Mh)h<@;70mM8))v4*#xv4dz3Ro@HA!2^!$a<}A;s)} zz;e=+z}ZPLeB;%NV^aw>n`mB@eo<5%tYTb;*}J$)G0ZG@xIHa(a75^o=?#lyYzdql z#~UrM$Rk9@vbAPa^J}2O;RjJs!RcUsHtmn?+3a}jR=-QK9qrs_%3sE_HAekNgu~)B z5)=Ko)|bj{R%`v9PMsVc&vJ5dH~t0fF5pVppZp8@!!qUU2R39j33Kb#t?rwm>hSE) zF&_R>lA!8Qcj@I3cgCg9c9PR6gJi?j;%h8ou+MpapDFoFYvtX5y}`}3FQg;J_<++Q zMyI>3QK+6!D*YSdSo{T5erF={Ojmfqoma%NulD(BYVi)+Pe)Pm&m{Q+YFzNw7v2*J zU%gRwX4iVV@RV1!Ua?d6?NTqSH*Z$WKA19`CZglp>@uUPbnNLp2PIjXuowV&+CDh( z9nI_4;n4f;ynnQdqTD4bZUKSdw=ntG!Rcw&AL?}q?)B7e54iX!%6|UCY2)z&ePw#p zJt~d39r}2FmVfk0eM(juMcp@8pVvt9+leF=L{m&D_x)hobw0;GXw>S(zD61~4d+m1 z))+Ugo$btxefZSu;bd_YuyL%wWQpV?MX8D1e)F|L>wTNM0?PHxk=?m^WFuQia4<(E z&`RN}Vvlax+1=lC@@TONhb(Zy#7D=}xIMj07Q}v}3x0nEJ6R&mPVv@rr(xQFeZ~qW zMxErw$8q~-Bate$6wmmfyLO~s{D2YGkR;R9mvmn&jV9hta^vQlMwk@IRHAPotF?4p z2v+@@Io&0ql%tE2tu|s53XS*Ki?Ad4sY|r?fZ=}Z?UKefzK=)CKkT|Wp7_8wNG6;4j>~I8+tBJ~=*@4WwP9 z93EC!5=JKk!l>~qVOEgz<0Vps0|<+^Nskm(*`xa#)m zNF6S>hF{R9Sro-Ngu4%<@=~;8@m!qY!$=dT1ih(Pl*LvG+`}mD|2@;;IvE3Und-{! zQn{=z0jbhKvD4Gbx`8Xd$2%1LZg*H>8|n^$J?>Bxv|g><9eX8lbe42;C-BPshieZ* zpGAXgUgiMZlS3``ukB5Cmy1eTKr)nDEKNIm??p!V4yIkJ!_Dl^juR$96gv$Q3ga&V zHKy+W{8`5opSemDJlqNGxfl8@j1qg!`|hMYB`UT*(Sfet=D_AnRzqx%T zeK19gkprhNTqkg_wO(@Y^MTFW-ONh{_I5?pl8HyOCKrvMu!->b>FEY%;{Ibdw`L&| zwIV-)CdPg{Sp5-z-t{lJZT(_ff8J8x%Xw7(hibVAL(_ly48 zVT;VeHV&oJBaIWb$a}>NPUu;xMI^+-!N(FSFCkxE_U+p@4T-z(2SrwvRkMqZO|;gu z^Fv%|3NPCdj^U)^9S1z?U!GbylPi#;zIJ13eQL}0B_V5H1BeX@l65KMZuj5nQ5w?s z;fJ3bO{-?_bDxRk9p}S@F$bGFug56lz3Hh}Od|AMBtt{qLPMI65TE+4ms-#e9ayO@ zymf1ZSufrH zz8?jNr&cB=af*c{I{|MpStq|V{KhsFnRb_0W)Dcs+d)G#0(ygjg6{uyd_?0)G8G(Rp}D0vbuG6L)wH&U0wCo-`$58S1UpCCHnKk-_1 zNww}dqqfH@#A^u|!j!>|Fy=Z*_aTg`zZ@psI?&L5mg!s-D*nc1bll#}1F@qOtbdyI zY^U1;5ln6l2M+E%D?r#WLogLss)${A`8X~HoEiyE)it(c4mc%Z2W4u;u*R7;&9gEl z!TwJQxFn7yCa2n57xL?3kAsDpEfb$mflHT)Ze>|5ic`Hf)3qD>q6Z#Y=?ii<>T;@L zm#799Ys#W!3cSq{Nfd9B1yL{8kR+ul(vw9~WVx7buMul#|6MwNk~?yEs;%zj%=SR- zaNVKvuYglvJ*M1PbGY;>Y4zpr&*l1Yx^Lzd2N|65LxarWEZG|S5u;74O{d2~l4fTu ztARA|5RgoIFw15j|GUFMip$n)a%*h?6VX5|PY87w%eFKT-|Qmvhg~evd_f&Z*=H5- zTd}9+NX~O%YVjB1$cAjB6+{u|TdY8Ll}rU@Qc)Sy1y+~PN`=3`E@F}1DT2z-dnXE5&7Hv+ci_Ei zNfArnCdW1)%%}wA@1tZ=~ zpfbFHoURLj{G*9@1va|{T=2B~F&){1mEY^N%YC66zv3Pl+3V;6X?-(78IPlv%H;Ou z^=-N@bv)5o0;5acG=*-JTGc(PeSVEeh^0%lusexT#-^gId|DKHB&}JJtyjLo)7p}l z{syCuh5R7{7L%fTpwdjX9)bd6E(b90S>Fm7gisjMOFrkd7zIn>7t6%2(pLx~RmY3g zYnz#ep{=XFblN(87`$mYLE`+rHiL7U9<=^(+^xla|L3FDCld+6%fqdfsiQSDt~6+ z=F^i9R3S%x>D9+$Uxkm)l15Lsxn;t7^+%_5UTg;y6m%gT7)^IYJt=$LPv|I$OqHVQ zr=GkGDSx(}wt_ZBqiqut6Jsp%z7CjIu}D{cmASD}CDi_ges|>X6CcL7v}AY_mI})m zV}UL)LHTL;X+34jv%y*@G$aV*sVHu#i^MxPOGfH2=FJ!dVa>+OQznmDWH`&Glinn$ z#hn^d?)zRtEy%dvLpfJ8{AWa8OM#K=Oseaktst3^yI$yoYIILS`?LnTDl-Zz!`u(B zf+yt28TB-_?-cI)$h?#IAi2<}R@c*+(OsHZYTK1UO+(k%moGTtb=PFhPW#i->EO%7 zX8Wt5q3l1UdQkTU13hHdQc|xq1p9kKY;-GZBYi{FqkM%smIhsS2G_=dUH8Vf{fUjp zbD!Y)#SH>TZ!Y`fFf+Bfq5k5!u=N2_M|(v|hX_iHLWAPfMUQdh6jg~5pUSo4W;4^eUe$pxRPDsZs7n{C211hdu=sA|rx z;ji`zHZ313DnC*{2ynpSIlRd6R_0ZW%TzAyn0PT?+* zql7A-O+q1u7QYvnqxOIE60l(V@3AENnE#J}sN+W}3}HXs7lkaAhKs&kyMmkV_3P7D zyQnKrrBK2)CUgq!tv-S|;3(S#Z(tv#aM6q0<1H>emgX=P%H!Mm*Z?P!5P`}DAc?uk zL-fX4VE9&F{>f$qhQSFJ0q$Vf6`5UjQqMio>Eu|0KdUTzBuLBonZ@RQ$JKzaeQX@~ z)EaAKABBGjhKZ^2z)fY%F2B4-{XpR-8%0hr0p+Xp(vNa!1yl}l)8aW+D}bdiIb%tE zHG6+sJMzocyNDWFv7GK_f+826z)9ZN?l0Z-Q14V`OuM#;!O;l@*6zj<<#2Dn8cW6o z?9EDN=BDr_r>cgdeb*i>w+ENwH$ir(bNmku!GQyaZJGku!u+OivOZ;_c6?kSp@opmupjPmbz@BA~@n zq9Fc>qW~nZW7NPzOQqA=ETZXZYkisR+ONkHG{fDd3?>?iOh;^Gte8NG+#O}56XXTL za6(j5N%=S4F586maS5v|ivL~i#_h;77k%hXQg z?QAd7k&beNIJ{Oq19!qOe}U{uL%15!eNl;e&IY~zBPntCRn&2G-tT9dtrHVnN7JNJ z=44A+yEoJ?R_IG>&IHq9gkw-F`Cc(9As@I+%r`eR+<*+|ksT65_*AKLB#nVcuzBc&9@~GObdj`qZ(YL6J;|F4FRmO;e$gtl)q}U5UbxI5|fWKR|5zR+m|G> z8x?my51A}2`vl^9dPs>C`rKF*aX+)Rp9fM)&-*?%#iYpdeo}`qibv}}H_|cXZUY3j z5;pXKx}=5!5YU9f%lcvAg7@sBP)|6?+g$%nORg>7zY|H6V{dae6VH#k=I~CF)qCZ- z(vFGu_&NIlX#iT_n(rq|q86TJHg$G|gPz$6(WYNAv@!ypsJvoWyOU<^`R3mU1qY5F zA4~jWxT+}wd*CO?BN<3K{Y?RnFpl$HWNW*T8*}{~G>4=UMu55hArkCuZVLo#4$!Xd zg0g2Ib~M!P?K*+<Oy-hjm0&YiI_n7Q=4|crd+wy%%;73@4?7Hrc;u z?P|?=beAzO3~b7(_4r`X^{0_PZ(n{MXl@7{T>{#x-Ahst*o_tN?|Yj5hipI{-A;2P zkWK3id5$pKwHu?bIos6b4~RRm^c7t(pU8zuqJ3M*d8h3G8*q;@#q_}KZ z0jc(+Bto1tO~_RYB1bVtjbwlS7tN1A*q*D_o7peRtHB#?B>Tz9g)I5R}A)zEE^^* z+Obscj3i7xiBDI7{i(ur!(5U2*!!(0=6h zl`X(ZO-;45KO+IX19uhgCby5!IlZ*0qboaAQ5mEx{8WR#?H z*m20KklU^PD9=F~JEKXF#9Xn|LB$To%2cVM{W@&3<^Adu)ARD_F6hS-B_aatj)|#K za^>gHNe6yR=fAb@$d|LDD~o1Gv*(46YjjP;ua@7bzx>doV7F3NSzd)#Nhw6Ot9^%d z=FDt3TT-)y8N9tc5Dg8{1GPt%c~1|Lr7+V2vqLLVovT-chImYrh(1MEGKfK-Q_cKU zsW)kBIFMp;1C!ewOH#e!h$#_%tnbeX=9{ue$d#iF{NDws(4-7j z1=2LUJq5dvaz1L_}%qzF27YDW5?Un{N6s%soP% zMpWVFLfJ<0eJtpr&nP#W8{kK1M^4&LZ^qUK-;iTi;r(e*#K$1X3?=hnZyLy#bE7M* zJGx#_TfVeiH|8p4Lw2z>+CW2YjLp{L*&0QzCUaepcQE$&n2{zD-21X9gPRdr!&qaB zMilrgC|v5H4mMl8o+>?GeZ?(o9&#k};XV2{&)6{0h_uUA(9n^pALXTXB*5ij)g&#{57@)DLW!LY3T|#i6UV_0sFY;aC z>jxg40N-*VnY3){Yvoq9G~~9Z2aej!>^1SO2liWBzNB?I@@-Msd_cZA+6jlls>yfx z;Ip!9PV&mAsK}x?QnRwsNYG9?-S92g6^DB|fmc?cm>*XyK+31rV9XQAMrCq8Q_1>B zE#!;|reiAI9y#-T^@WU8R6Lodq+1qM^5&Y73T4XFY%GS<$GIaCs{Wpz%{a<733yLB&g;DZ{6Wbozhlt-oO2T_s4x!_N&!dHj0XYUi!XomR8 z%F3_v^AaV^v7sM6%wPSztjK>Y&cwZQuZ!M!fzjGm?T}oz5k@%OusKK@=uAU4b8U$E zVbOCUSBD*7zEEB}x2zf}Q7c^Q)1xe#=6LR6Dh+~JL0tr;L&{cU4K#%7@5nvOdn<1X zZPI5XlOC{rf|lPUdeclWMNdD43qrIN7KI$1{}>S1TL11DtU{;rYQ~O^-y=q0-o1TU z$X~C3P&2dz2kN(ZFw1C`OpcPejf3zTShQEh>Iav@990NJZPAPoewKi7$+t;+!Qp6E z_H~^N8-A2T(5geAkSKCFj z+)`F4%Yc?TeV2Dmd{#iGf1AP|&t!ezO<=WF%pC|N_;^s zxZvxoM&Gr6HCELM{A&KQulLo37OC*_&A|Gd!1_PZ=MK~6-@4(jlEAt{wiJuGya>S1 z8-StB{VbbV^H$Xc-s?#mQBo;O1lErE%ThjI1k877le3dvE&=R-N+rw6M8kkxXPk!` zGMq6v$Tfo(HJ!`7C@rZHN5<)fW$m<@v+G+iWJ+)KENb^i-5eQ__&LqP$jWrS9k3z4 z_VMxbKF%xbO`>1f2Mle+;fWL2aLC5K#hIO2_ziVG^HMk1d+1K2Pg)PUtwN6)sY)&C zDHyreLoYuY%YLY8-`BguE)WR|Hp1_M`Qdhq>KJ>6htM54gv0U5l6K*__G!Rn%k%-@ z;lP)srot_3n$scbR;iJC3|&%XQlFW(H0B*L$mrYr?5yVP(FYHH)Xp7R2tK-Bh4SK% zuxY6|_Jiwf@)LqO#M8e$1Ka^Ef~eqou)}Hy>;<$HIxr|Wcz`l&cCw+FADNj9H*L@g*g{JFexBi=V<)KISCnE0T5pJ2$EdIJhL@eBmi zL%vFf5rH}Q#;3oclJa1sq`o<$vNQ?F^kRNCyY6|lun@o13@}l-LM*!_dKLQatxt~@ z=g0xLD5@eUPykQ1K&?ThAs-BePE|FNZ@O4Hty@|~rjU7PFs2Oiou2rs>zNuS*!z3{ zPP5U9pKI4gkMNE-nEi+yS+%XUi|(%-J)dU655MvHYi+f)MI>;-b4j7dySIXBsPBgm zV*qN)QUENFGL;{tjV<^qo>ifQohlG<^3c!mT>`-M7TgcA&=`tfU5yEEF)VXjRt7l3 z(?FNfzu@eFKPJvXF;v1On3L12$q*7%`6F8pk9 zvTDuW4**OS7-5{1q;;@UYm-j*n8pKhZA&(MW?`4=uX7Twud1$6mJclj|8P9FnFYZ3 zBrqdha@z-o)n={seBrM}Qh8Y($nrEWOcPTt2v|`8SfSKjw*mMZ_PP$nR?aV)D9Wb-Kol zhF2lP(l$u~*zzB2i*RQ6<>m;>6*%RIJ_tl294|K&YKn%i`0ay`Obn$6XPwV%1 zO!mR7V?_2n%Ay6pPb@0#Rs|W_rq?b2U@ois{=}#Awps`Y@M2v%LQt8ZjaE-irYPJF z)y!LO#DFnlZT}|jLhb37iIKn=UP`tyZKgdO+3|AntPs_28(0lcS>$56Fm0IoFwoy^ zt#Z3mpFZi*6$kG8(Hp!imz+WK{1K25pg2*#Z@O9us^pCdIMgl=1S`e%) zv6e1K%S8eQY+{>&(0b&0Pb0t`XpgrsC(VB#{i5z<2`;$?D&%lWB!E5 z4V{y3nEa`~ApH{DF9qe3UuA^=Z(!_rM@vpte+SUI?YnfdURs5c4R}D?uYG;Ndn6HL ziv@o&S$q=M z0>FNmwb*x{c_L0AJFCAN+^;iuZEO_tl3p&r`nNpbcSC?vygU<5^?A==3n0aL zR#6;}qC%{|3$QQ(m4wxAlWyqG1D%CLyl`I0z;j8DpTKm0W|0+liPMbbKl!*>2HJCb zYqzr5IDhV*l?o;1m|Ou~Pv;Wa3zk&FX$_g7>K6vKY{npo9{y<5=vL!-^A4q9`IJ8SlE!Dx zo_!h>aE_O>y^h0D5KzO*osTOy0d8IZNhS~pW##YiUkDZiVVYZ+w>lhxmp64cSK7u$@C2zVI7kIycc?_vi}DVVAlCd|bccuzfO^Dqt<;-a_6_ zGj|G-7cE@#h5FnlN?OWkYV02JQ(Igtu6>~slZ#}Xy)>|C#A8Wo-g2-6yU$%>0tsuN z!KK3}A%zhF2%bo9nEVQ-GWC)P8{T2Z;ct)P6IC2SvIK(!E9ioPQk2N*U~L z?a9~8%1WTI`r?dXi{1dh-~g<>Dp}En1<9PCHYAcY1D?mmTr7M?pJrqs>g^q`?nDldB82j4R&V4eLnfixs_s09v)R?>9<NehQ?^E)GM@ z8DqOqA(R|v9M+Huvy66${-y|Sfy2i@4nb8s=KAsfBSi2IcY|8?H3?g@MVK*?-Iw}l z%1-CPI zV_YL}_^`AyhaX3kHRBIj4Y6LGeJX)Acv3>Qgqtn3V*-eLl>>MUzK`rXal{RIC9aKc z`-bU%d7dqI-S=`;E$3WPI_6t?-{^-AR$qKX-F_s`aCVNx0<>0Ih&Ox`cu>inu}#oM zw;AN0yS(jHJRkvn2yfeY%j z#k%AxMn>KC9)NHmyE0V4)|};)H=;f9>joOiK@URts*WIPlOrX9ZND{#9FAKOymKjh z)L0_InIlp&TsI+)XnBV?I`2vnmu??Gq4pR-uQ%gcW*mveWd;jTi#p#OihFiz|AbtVI zHTXhh5NGY2hA&*Sva$*c2~lqpzT7+=AD(%(Bv&$GuHx4aT?1P|xV?0RI>S1m0~2P4 zj(`C*4cUZko*nzZ(5Rz<@1Y;JXWg9BY=Fe60lJ(o*)GD7BN&2iQ9HKoC^AsT)x+jP0zm}7B>U>j>A3iATKCuiNk%_ zTiCoX&lL6A>==vsYKGlfuDpp_D+tG*aUI#Chk*K7Xoiy$PplWMZ)Dcq_nZ#yn#!=N zT}jZC{6g(L>c986q05JqWn0fWRN6UN7|CBM!UP9bg|+ z9jqqItd6}8V$@D_a(sjM&z5|veKla@aj6uktvFdk;SWsGfK9x!y-L;&`#o%V`hE2Mj~~C_J5OJMdeBxB7d|IfC@#(xg)9RPqw$MtUi3{q z#iph>!M&*?A7~g;3pJf;MPlJdxBM%!PzIl+r%ppk+f}D6a`2~R zeU+QZ~i9ecr6sgpw&eTIL3 z5jXGq^*MLCi;#(B2(iJJ(U&AK_JBL@V<~ z78-`sVzxCGBlx@>*CxFy!t}tGWjXqU5O;x69~QWw?um;#&8jOX=KeZ>est_KP`8(- zvXTR=;V&)e>3>$pyqK{|?-nvt71JE0+#~0ymlzoLK}GXi=sO4di4&PwHUa;moCrQq zC5?Chtpq7>5Q#a6aWe7L{tcxn6k=eyw$aJyCi;v;A8X{wp_}Z~Uv)A3xu`X}T#2>Y zem~vp7XHv8Ua{fH2<8-I*d2#MS-R+S{YCtxvuL#T;u+J^S6W?)~rF8%Az^?%tO1VbLb{`Xn0s;{ZO~Gvnx7K4{hb< z<`yZhu0ko4RWA6K+VgoJxesQ{Q9GTk_;mc+F+`rBb_irlx-soK4tLG??`+HED&>W9 zZa+=WeM^j4Y4AkE&;#6!+MbKI95^yltUAiuBpE8Cq9Pi=DQd2Y*h0y341g+VLGb5h z49)^g1Qz-MGM3z@8$4N~YSliY#lO_Jr4`ogJo1Q?NYw|@ zf&Y4E|4skv*3+C^GMEb5Wm*>PJTn5rC5vqo`Ur=2fUQ6o)9G3g@S{&ZboftNLEquo zXQ=gmZu8hOrw9D~L3oa|vw9V$wfXbz1I#SLnKe(vq2x!1(r5jA@?F1sT{LDX}vTadaG$)jR+Q zi}e5`hSE&?>bupPe9#ZMBRDtm$nGZxviL8Q-zQcQzkZvWD}OQw_yO#1QAA@zgEWMn zq5U9!i25s*rP^UrM4O>;Vs>c`%xi#ND&NRfFOkW%BiBr)d)+r15cct!{q`+>etsUO{)$cqVP+AJ zJ;`flNVoq{2>yQt!vBTLypXiITeA~|Sux*iL{+5J9Ns-6UDpLr27s4-J&BmCtdsh7 zX8cAT?3IvDJgX2-RK){8WiGSlo+A>!Am!N=IJ%*E>}P*t)&F2?D817=|a{4 z>(|TglY)TKCsZQS=6QEMl?^S|pC@Mzd_{BQt0*3^k76Yz@;p8e_&0_dhON1su>d}E%SvOQyk%j*Mh#3EO z3PzJT$mY5MjV`*EmyBEil2zurLAVGjDEV^h@6Jy8Xfci};468x_WR`69r)y6dqv~{ zky%w0%87zEvc-Sb8%~&Z4I{iLW3_^&i8_|{ES_i&UlJ`i+HYl)i^HkGYWS_!<(O|r zA`};=J`dalwD%sUip6FmP~#Z(O84)_lU-@ms@spr7lUel$}k5t>V!Qbc$Lrc3FmX< z$AF)$o)NQqXQ&5DElt%+nB}%|_#XePy1QJ-8CA9H#B)XnGYbDBsCYP${Va!Ye%$?@ zn4aJ97jokezDsM60RI&Vq2g7AbO}!;ub1|lT52B<=&tGfTX@Npz*?14MCxTO=0ivL z;mMsDg$_|G1a_LDXL&E3{u=+%XX$BA>Ob?x;YNSk?AlS88387|Kyu?A@gtXMMfWY< z&q=;~4-WsEL3bWkMY285Q--ZoG5Q8CE;D`_E!?fU-@E?OvGAv)M0y~F zeD}-eU8xxjOpig>DJ$&S`ZIdk%osfm+3a33^cmkNJgs|d4f+#>*ke1zvvDtQE&~#B zp$)?9R@EMegoLW9N1yi7)8Np2bLXqbYzuhj4B`QY2@*)czcov_3oPC!kVSmb893p2 z?8}#3KdnKffSa#&_j-`C?#*~FzuRT{WpLi&N3Xw|8UngDLc*X|^X=Zb?DqGci+w=d zJ|&AFqJ^?+ApNGg=Mx7LcO-t|Lu?q@(y=fFk)N=6G@xfS=5+;9}6MJy*)fb(FsRK*f4qsf^YAXZknN+tQQ|;CvXDq5z}4AzjPrBcjUlJ-BnM*s6xccX_4h`C4Ax`8hgfLY%KkoTTGoD2K`!swZx JE6+RK`d?LK!%P4G literal 0 HcmV?d00001 diff --git a/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_18_1.png b/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_18_1.png new file mode 100644 index 0000000000000000000000000000000000000000..a8c2cc341f10aa9b4e9116dc809ddc6481673b65 GIT binary patch literal 7093 zcmaKR2|SeV*EiY5Ze(jgB-uv^F-47$t)yfpM1w428$%4)iO7^KiOEvQQr2M@Yf09W z?EAh9lWpuh_xSaD|Gm%qK9A35F!z1k*LBWyzUO0oPf%sUO+SA>^mcK&cgp9!rk{^RepslUu1VEBlxaTt`Gju_py5a4}n+AyHB0tT8^YjsQbNly-vzUK7qe!+NWF{)zI401GQ?maj87 z;bp0FZ=sz?nA?1{5_U{ZMkYHqSNw4WAGk#a;d61g_rY8E|1{TsTh_85UOmF;`&c8V z!m0<=nD-w8FR#zjL8@uelpUr{^8)smOk~B6z4Etd6Tpu%zAcPc!9Y$67XUUo#c+)! znq-G&WKU%&olC*5ITVZ~`M1zpd1EqfL_ zOsfg^yyhxhWahhmOF}AyQ|*zW^H_52-W^A}wv=Bwj@I5YvDWdrRf3c*E?8FTKKMg_ zTc9AU?_dz($h;m+bNS)AQJz^mVYI4@q0BMSZy>=V!R&BZp7bCp9I3A2?7VH{YQWi;u{2uCJ+^SYn z{CZH|@Hx>Yf!%MFgcYLfL7A@qHct_Faw6UUDST(oaw6u9ukA!f)^Z^%d!CRh>A9KgDEnme=*GTgxy!F#OF4Su@2#H@^73ED0n;^xvyz9r zx+)+`edCk;iHR&EJ!%wwy)OX#BKU0k`sgt8@HAN&{k%=Rtm4lngyDJocx<17$yh=y z@(dIl4(}^(@5Eg4FQH9R zdQCDAQp@)JjRxj&?8#$jqv$LgfZDruSr8KlB=TS%_xcDU2Y{9)>bl>Y+#A3d&gzG68XaD~=w63ZGdAAw^>LD1Ujk-PxeDxmY0)9JaMtQ?->;pBM-tz4xoH8> z%HFSDi-TnMmoO3cT}|;^#PuBf`>!g%ovDR%HTLhh^-o3Imjw1!lZh$UaDvQ0bUac2 zf~)13?Z~CM?la4Io-cq{R-SK;Z>*v0h}862F%4EUPJCh^u^G6>Xb%EGATAj085c#Q zDqx(@^p@5~UW=2ND zr-!0?*^dmmROlwDFFdHo+~jSFr&IvfKk0)At0f5uLS`Fi?- z-C&khvsI!cY@ps1(VNm=`=WnGf|a|);oWmi5%ks))e;M8PoZ__2-q1&RS2vZuGy6vLh;86uhd&vu*R!%2tu!sZM#Rh zNqBymc$)SM$`OS$M}#86*lT_6n~$WvSjL45rKBC+N_W~<{v11-;eybBOa8k)oJgHm z7Mk9NzBT3H4NY4 zJuOPg%x^;~$yswyGp7ayZ3bf8!u#Y2(XyucFk;t8p6A3{D+Km7y%^eJpTU1cHn~bzVA}XXqY9z7?&RT z!y`tlKKF*a6Zc6~`e$c%LtaqnSoa>lHDYOk>Kv4j#mIG^rMVL@*}M5&9bPPM2`)TG zs(11r*=98Z6rY0kq11%AUi>IR7hZfrgYJ6j5nf*RT@v7*buIoqR)#v0#RbLs*P4OL zlRpariyxxOU>0f`f)(0IeFT2->*P%0qTE)?&Al$21S*$tzww&2`6(kHUuSZ)(2^Nh zjq6Hk1=xc-IZq0x0YT~kv(OJVv~%kcIsi#0T!;E~qko-HYUA&5=^N9klrBAU1F-VC zdF=@Lm$YQ?JHtP8jHSvg`-X(m@@^+%k4?rnUT-;-#O_-*%6IOVXQuY~b&H zo>;%s|G~dvMOoA{+}rE6nA8e!aW}@L|Am%?CQJ+`C;QKwF)-jl8)5CJmm^CY#y>Xiy=;zP^xa39R_Nno z699ib%cP_+l{@vJ?xPs;Qb|{tGY~HhL*Pb^{!4P`L#x>$-zl} zb$PN-cQpmr&rYv{OKs_kXS;s%Z2NAeA*D*s_yA~=oBmVg<$YviEJ4ErfPp}naTZhG z^)mn+8F2KiUGtv!)Ib@FYl{?h{6J}oa=5BDNc?7m-&z2xae_=1FnY^FKwaqClaR&2 zHFg{y1n}tRZFju`7TUX*9yCcW$7bgG+k)^zNKF+3K5f6l(dJ$ce$0a3_pvS^JD1`BMZ!*1RJF8_9S8K95 zUjCYm9>EFVj!otI}4|bD-5n+k~fDg);5G;A+@$Y~`+utx_G8Cgd`Hj{VgoC*im@Xjx6G zVi&;~zdkpPD>(5COX^NFDrORR8f*@iTLCD2RcGr>P>A}ThH1%XP8E5=M1_o;v4P&I zV+%n{<9-e#Tjg(IBb8tZ+_0}WdN#mwBMMBs`Xfm;a5D>Lv6>QMQEXy%Y<-z@^)g2F zM{nbo1R&nrQ1(RaO(&Q2>4oX(u3GhcYtF-5(*6k~YkyYdu_U<-IseIPDgRJ(%pM17 z2rKxXWRj2#Kg=dE>35S!zUk_mUb8#Q7f>j*SNk(*EUpQyz%;+!y$fa%dUV-8KyC96 zMMv9T2Ild5bF;D3iOBrpqg~)pPm+0fx%P`69(oAZ0s3d+^*9Zp!)CQ#R}P?aqs1Pp zy0yekHe;A-Z)J^DEe2@3{S?DHAZl`@UiKT1Q2y^XH=HiR3uoV(7)h3D4gjjg$=aW?BNy1yqxP!C zK3X_KPLy;_u%a~nZ;GC8{Ru2f0+h66&7oi>tr;={g4uidt+xFSkJiLL49%vW9>RXY zrbh~Js)=XF^S+c+0Dmo(J4Lmpxm+tgjv$p#ojI0NuHs4m`AH~YW%HAQkr8=vqg(t5 z>_c&ss@pdF|HTz}TW|r-@Q0%x<%;(ef9edp;}~Oqh5=2KhET#Uv?e#M&l^~Cp4I}m zILb02LU+~3F005_MmqFh-**3@z(x7xJh&KeGFiLfRnpnGJUmF@OZ|f@u_)E$0MD^U z@%K%P8wAl;{wQYA?rXsC`t3D&wA}jQ>{>}OgksmkGawqB9S800`B6v5S@-JlUo0JE z$$%QFTv&HXQDSH0jIe7C*!)7J>4cM+aZrBOfvGf@Ahq^4R{Ied-bYQ*1EUXldIV69 zeczPB%&SdtvRFHwp~_SAz9hxbGDu=bs=~hb6nWgkL|hcyWuVioc%kmOWZ?6@%&Te^ zQ%M$pgn)|z_Pl$MX8TQa?mJVt+s3n=_j!Psewl}--?V;Xg+V03zkLaJoku+RBf2Of zUk#O)p?<>f|?d#3<)hy?4)xbk)5+X9%k; zbs{gBw!baoY#?F`ESEG+4_AG%$d77zeZy`DVGLrU@qyopt&2UD5vFv-LI3tEe3KHS3`61)^0Sy*bbqn1+Zi1@ikqI#0=$w^_~kl>|U z;&-l@wrIjda0}nO_mD>L$`Nbjl7S@k(O7)p-r9)2`~#k#yQ;5pe}9ib9_Yy4>{lnB zRYql({#em^9ChVkiS7Zz2eu#B3^_}3fw6GV(5!9pbF$Vm@m~n0f+ySwdf%~>i#eW^DW+V@g zmqQ&g!V9ecjREI|3!-y@&WPSAt=Pk)VYnM;hLaU_%^m9qTlJh99^{-)R})-Vw4M5b zf6tu|)@`8k{P+og<%-YGjS2^DNDKG!fh$=S@L63$Y>nQ1@7mLByTwLN zK3KI+QRb}jj;209w}2m9IFvWbRjh?IeC5cqvS zzJagCM(B2b>!LBty`sr56i9%Wtn@;td~oyKB=O}8;$MBUZ@;nd0BDvq=?YS2`ztyr zUkeL2e=TL~9Ud}u;c!8U!%HaB%}e9{F()t5NSidAKuVNqiaEcXJaFyl8M(hQ$TYgm zA!ACT`tdHjI9Ao|(X+D{{#u;I_z%fZ0jb2>fM3S9-A(RnwKpf4U?J262V6GHox>@kZ-jW0HX>L9nqBCNc0Mr1iz;=&EE#=h; zq}ZtnoCR>YSL}hC!r;PV~yT*8oK5zwXBo3On9x0i>03F7W7T ztM%cc(n$nCN`DEt&(7e^l>d|e1F5FgcA#^@Eq?t`DAPD`cdXw5=iCcG1nk6L+2fIM zaa>vW5K^_w+pDcCuHY8Q%A-R-2`5wtcS8^BdperR!`cMB0|T9ZczEpz4nP#1R8E~I z`no4{nJYlZXJv#~@uk}DgV!6F1>E!QdFV17lg$LYmRaQ<{quh@rF~88{iIru&`ih1 zQO5_)m`l3_6#&0sO|aiRdVc_7t4dV>%Eu=cwmeM5z1U=N?W7v!xKN+~z`|Ktn15Ip z++xu`?13CvS}TUQaVQ3c z3mDNLTT5{kFgRh|{*ys;R8;oO|8JkYd*FR`JMw`K558&nv{NPc;?KBgv0$Rta%;s3 z>rRI&$`B+{aDO&|p3);<^1@r$yoGnCqY^Sogv>#;bbyxQpDulh3)^xxo;B(}k0!KK zopS>NZ6KE=pm#rYzE~WufRgY~I#&YutJvY4A7fLkkO*jde&O`48W?>e_1?ct9K3T2 zy#mx8v4JplM$Bi}r5GT5LR0sL+5(4Hf3Ia|?)deezjF^^O{Y(*u19fc%~mK`zCz(@ za8uaD*zl80M8YuVlKGew6vkpi!VD2UvSdh|hDcb2ot&&EV$&`Yv=s_;B|LhMDk?zf zfk3R@6RJoppgFJx*`}tV=vBkH^x+$RX5T#`xytE&^P&Y)d0<=eVIK(7v(M@R$8Js* zNU7GRKk$3#+M2O6cT&{;pWe_!q2@qF>OL)5(MIJ8Xk7VLgp&O&_gScdG(95?bfQ4a zTvmgztPX7B1rm8vi;Q8YJAK7#DBRxQ-)24f9C(+-ooCCb)xGb$!tTB2!f(eqj{K*- z0v6}|b-sBi#HUa~2tF+mX64a~wbjr7(FK32<1~+edG+u=3a*>rYM>drK&IRZ%nX4d zWbJBdX>N;z72_9Mo+kzAy3rBAUXr&?umrrn-$qd!TSSygjbLB%!yH}wE**JJ7dlZO zaa)*1c<*PeP*5C`pFPjexd)YI75~)U<%h2kh~c)Sh@^}lPW;g=RJD<&bQswYggX^x z02-pJ=7ntyrnEkOo{IT6jer7x-nnUjA$ghlOu?(up+L0k7C-bTswhg)p0t{{rfp B(NzEd literal 0 HcmV?d00001 diff --git a/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_19_0.png b/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_19_0.png new file mode 100644 index 0000000000000000000000000000000000000000..50af5b086cb4609ba2048c05359b25bdad72c873 GIT binary patch literal 10778 zcmch7cT`l%vi^`&lA{C_5FN4-BIR}C9kPg8? z2?CNvqU4;@>*3tD?!E8#?q6@MA8WB`_O9;T-SyR1wX6D_mWB%Tr5l$Z5C}EmAzT{* zA<6~6*DjEQI~P68tH2*pmj?*l3*g7+!b=q4w&O!X7YKy%3gMTiZ`|(~+>~@xGH}&# zK)QN7cea4oJ$H4qb#S${GQZ_+;p}4NV1G-P{~o^}@2yv^u8vXy0{;}?cW|~8ptcJ; zg+OjW5b*oDo{1~tuU$jo8OUTMv658r@y6K!mHs>zA*JAxb!EMpe56qAH~% z=k(e066c>GkuMZy51FGfdDofncT(vME{eY zdZ4%>EnQbd^Y~N@ZBJoOk(-xS=eRM|#+G+fy1heTQ?v5o$G}CAfhReBKEf3nuLkq= z8er)go} z#QylgggCss&KhXLmVP90RK58`9RE1n zD_z&(J*}BT%Dk|Mu$r+Xi8V**!#-k!7asY{Yk?e%qi9kUOmvSaG$Qgb=yOH%2hx(< zg_$#zn)S7H{%+KS$n#H2GVAD752r5iZ-?vA_w=+EmAh>y!SDAa(Y0_=%3Oomu#c07 zsI)rcO9G7WYmwil9F$wDpk*q3{%O~oA$#!FU{Bpt-n~n-$+InJ&nqM;ZdwTlj0&Zk|ZUC*WKZ!(D4{Y0mfWEu1psgo) za07I_E|IF7cV~&v)tS)M>C+!1T|K;ZG`WxSab9tC7(+=Q(P`>~M9%&KgH7qGoHB!c zKfw#5dM(D=ERj#h3Bp}n1Q`oF;e{$27)hVn}p{K=g*PH(QAJTaQJHt^%Tm0 z5k&4Kc>Q7g7%bZvyj$)r?D^suW|yZDsBQTT?bW-K7m@g*psprv zV6$duUD*K*w0-OVZ(dzd(F)7)&tG|6^c0Bw3ZT7(JC+JosB&mys9HQ(B4r^rPZN^? zl?mwpQ3{LoB+By^EFPAgS61f4-s}$F9O1>=oMXdMp^Df=EoL5Ktm640oJc+tH4*p* zHO!zijveB0=lF=pe;HRtPahqHZ-b%ml z!`YSGNQ>~Y82(;%LDdjT^zS96>G(KIm;gYA_mYd&`6!(PgtnvvZHoQ_0-cd<@tEik zfFl{hLX4po03uI{ivqw_5V!}%9ABLh7FLTU0L{>!rmLrr`QOYyj7?~h8vwgfGF4kE z`fDwj1u{m6Lif^U-3LoyabYzK$sICa5(M1xxF}>{35^BNxRuiFNkD!VvldToSJzN# zPmlIofa?9|62f9}MA2`|8B+*WqE3@bHzWJ-&mLYk(5ijk+r#T?LCuZOS2GgGQ|dES zENp(%w}rlhw!c*tSKu186DkN=CQ@f=tnDYTo`o_Xtt3g&XxwCr>P7x`qXe_io3*{qM`8ps zSA=gvXo-fR?Jq-_*rqI=8I-J|Nll1k&~(06Yvmg6Gz%XjhF}sfSw)FNqD;dkfiy^Rf%)woKlFd`0)KC22~zmk}Jsvt5%g_ z@qEx{{cO@P{5q5tLJJWQ6^-5@jyT7iK8`{6?!B6qUNog{wbClBKW661@jQ5+XN>pM z`xHt){*If~o*n7=+M}(1j!;$Q@!7zHz}b!w3J9W~Lyrp>m|9>J6pD?+Vo}Yb0jxPs zx1gK0b3iqBjTJbC-mQ{8fh+}tu$hNtzGgS2ytM#B6k)@#`W!_FcX`TJG22(#6b{c| z=62<)6)8WeN7{e$C`K*)BnG zE#}(>g6CK}l!=62$}hiuco@{=SCbA5Y6<{AuX_KUcP>DNHvt(qNdOGMu&Q4aD7)AW zKzkarjZJ-;$N|t82tYf>XA*#!-pEv;#@}!a1OIx36M##2a4wMo8U&_{kprD*D5B2! z-AN$}U&Jp*OF-&-LLU#ns^{p-H*t!qqaY}zgD9Ff2!(3wJd_VdsdVY>MRdJ{>peNAg7k7Sy3+HQ1A@LuTueekp$oTt z^n+!i>n60Br>~* z5jt}*W7(r26N&h0XJ^NDQpNVD=RyZXz)@GpJ6hIEz2qnoec>mKI4HUU*`@- z**zYh_DM3=pNi{lAQ-&fELCna7KWf?KgV&)h~UVn3bqC|wMJ2t5PeMND7&tjU*#8) zU%q+IMYw`4nlBZW4jH>x2>C%otI~P{vG=f^L7`6xZ4}59SfXkaxqK>!7s5v>QgNLX zXAWYmQ;GH;)Zw19XFn=TpMJJxpx@GCJH|cFEeE0meA#`A6oI;CwE55^{-lY-XP&#` zGi=s6JH42*w-~otRve5Osr`jv*VfjLwA8fh{kylLLRMy4b^JJmH){PMAV?8_7$A_- z@J&-+bJ9S+t;g1cAXCgqi%lk3odZ}6BDuq{gHZ5F{R3kt1<>zAU>;MN+$fO&h8)td z6%fy=y#+GoSnDQ=Mk5Y*2kSl%Sds%sJxrgdAOIGiFunfgX9B=^0KjvbvVRf*W4S`1 z8@gz-xxDK`U_$Wf+~RXuwj#(Bi}ik)kgNB;^um;0xWc3HIaySu18vUoOYwW-5e>lZ zlpSjMax2sr*)0Mo`%7>Q#=h!KVi!HOFqp6jK@!!zar7?9JmP2U*e;25AZyB}8%^&A z2ED~D0?)uKoC=$2R%(5TFM*k>Iwvqgcc4yt7xSkZ+JEuOfly#1m%8Tt8Y{;j@(~v(S$v(}kjgeVK$mFe9(1#Zy-)>e(9^_i3@5c;k*yMBgC#hnjVS>(J#TfynL8 znQtUz;@wq z>2(*pHLwsK=!WZ+xJlw-);Y<<%crHqK-IxaTH-|GSTL&)9iXaM6sxwLXQi@qn!RQ1b!<3t6Yj;iH@$=%;L?3)KORDBkaNrv9Ue#LG+KW(DNhH$8BlB@V! zGk`)zxu4QDNL+8+yLe}rq-Cedi)MSJKfiKmQ{ zv4JNL=tc`_{}KF{r|rr2*lyE0j|{2aSN6!5&}p)P@CP^80?w;>u7LZp(oVh!uWonWXbm?Syh$A{(RS4cp>_!Ju+a4D- zoPE5@IB{7=>nKrE_k zXOI#UisB07>NbgW$^gtXw_AM9ObtIBEzzaWI5YWo>J1J1CE-OG4S1gLiM$n-5QFib zfv3lY?xnX-f7`>A)#Yq29qC>N>-humQH+#eZ%6!`+splG-rI1K-d{fR#`scDnz-#gvafZdVKo?5NR#*U(~)lW^A(5a zCyNH{;V{aaUiT-<)-K!BtiJH~2wVlsMtQ=#xH7Ha!D`w_#;HGFyWFFUlXP4BeJj(6*HpJQ!Clh#N-x!ePl0#i^YP@%ICa1)9o~y(*1FRPM zsTV2ndY8^Dc79@5QNP!?s15vIEICgn_d_JsW&V-uRIS}ngO`O3 z#hS5JN9M8dTExWIc-u*{Pm;+M)eY)3l8<=mjLytdb#G&WHB9-FUd zocawS{dN+>4oR#x4YSp|Ts9VV#=P1dFo+CLEg5hd!#pJ>IYsN10$5x}G_lK2SYNvxJF|u~B*A3pa3kAL3eonOQIW)&@&3&dXJ~{akY)awj zwDnZ1(yhF4`tqn0O_}|X+^+kMkj)}#v9VM_yid$-@zbAR>kfXdY||aq`7PKIRu9Z> z{W!bP_NQ-v1?A%Y`bf_DsHK>;PLlon1AqVZkQ?b+j| zA*@TizOENz`E64Lyjsf4zq&uMv$h!-sLdrtTmlYlsPosn5S-NaVD${RNX`G$BO{PwEY$EGVgU*Dh=aF|Z^H|Q?LO?5~BuQfI-5uJ=gr|~*!Z$1;yj4PsC<4r>X93( zU+wDXt;m=2NcMEnBNNbszo*w-|0;9Yq7@2^R`jn`8{E`1W3TiSwS%N&s_F*|1?t~C z+gq&#(q6VZVmGV{>Rg}8f?>Z8vC7o>aWxw>L{2Ub_o8)j)+Z$UB|LDt*_vHCN!?mW z-S-4jEX?&{OvGANarS;YMogMbKij*DUSp)nLfbnBMKp@%ruvsAcmH55ttklcKU-Im zIbAfq+Mgn1K@7}B<%3kBSYBwhSAX3rKNfvHOy@#a)!OgGJ=Vopio4=lJN^@Y#WYd` zPEla|*Z!3J?EjnuY_~6FY%=>0Oqoz11^O7K+!j%@=l5 z)(>&G76IP64{?$f#1+{Z&z|~H6V$gSVC!jYlWFHKPJ6uQ)^yUtNwc#X*^R0|sz0|M zhbIF*24o77Zj08NGcMQj%Yja^vZcCPeLf=e3_0g20L?O(Eko~Le!Q$<3-}?UAhziO z5FQwsSqR4iuvD(BaV9{aU$8bFVr=}98Oi=$6M4G&7BtPTM7}2A+Q%t|2S*g$_*=+w z)aM&rayBL;b-cXPD9ny>#Q(Br>kvp*5o2?AbFY|J5QyZO0gR;%=w&=f_H(V5KtYWw zv;DZLRzj~}$@43gB0rpguJ4!5E6_I3ZkN_q1&Qy`wKyMU=C>5B=NJC0byV5_rRBp?av5bY`Za4g zz#@0lEVp|38Vs6W$oF&U0Z1%M>!wP@K7XJW^D(GR+iQ)> zZw-gs=RiYO^M!nVeRq>7&yQ9TwkGHos*olCX!zIQ%8_A%8|&y^<4IU1DPy8o5~W5$ zd#2CFcL%&BE5Bz#1S*BVQv5yI9>AW>w#ycd)rOcR$qvj^_{=2gNVv?s^s8$M11^)9 zW)Gv2@3od_l_+Hg0~?GsrU%zJp|nc@F-b|s$PPb(mDb10Y%SWv>C3-6y->X>`&V^i z$Zv_^_v}J%#P?h`7I#m0F`{n?Z}a>18I$v)#%bYC@6&BXOKs`fDsKF0a_RlaajKKS+bv|msCC~tbxXHP17u^qCvr*j)bsh^Sw#!ef`-^ z$}d(>#a=6m%gFv!{+OQlKU;lv9cA_HtmohDNL$H&Q*DR`|5HXl?c5@3A#!EdmapC+YPUeJkQd}<(zkUOqx8&MGu&PXBc)#4LSU| z$c8)8a$ud@2TsL%RUMZs8*PJ=eA_)KJZ&dHp5V@ke;-~vZE552sT+0pQ>LGb-gq8F z5)1s>ks$n78sOng5{~xT5d~qE$+cmO8OL9@n79CguTfTU!FVawc||ry8f%eauaw|5VQFmB#8NKKe!9* zYQ01NBq#iAPNDuwEJrjO1BXe+DKX>6^%ji4w(eOx_z33QgOQ6s984|q*A>PIdJ$9J zu$E{n3+`vKrafB;*_;7g+GzDzzh^{zMZFEH0)9j2y`}o|=P&rSn5^TB#Umzd*Jglq ztOFnYYGkA{ZNe}0+G9O1L2TKzJBwv9&Z@ zdjt`%o9ON?Vp(2X)<`~59CAC%yrbbzn02B+n=slyQ&U-S-Ki_;B0-I$+bV#e9Y;B~ z2(DPDlPP;mz6K*8Q@@L{KYUS_XT`ra;W$xR%@RtI5Ayo$1ptUwok!RvwBV#i4U~wb zqu&BpEvi;qWnGJoj$7!{Q@ny^U}wjL&WPVDOF2Mkbszphw*Ha7T*_Ji9%`l0roAA) zO?O6fM6)Y2ea*BQ`Xw-V%KeCze%L=ZcA^RY;ku~Ek0-45zqWT6jWzT}sKO!(LQdN4 zUU%I7VZVgea*!W?D+j`#?Hxrk(vu*x)2|Ok{48^#ztX70Q)NPXmrZSRi%k$6wrx@I zT5ijXIFE!!%6V_EDu9|s%jKb{VB^@dQ#nN*_v@@j4!7U^TpWYSURLZxwvN>!T8ifb zbh0(LyG2XBUt@Jh0F|vVCy*{SRC4~8>z#i~dHGglva&CwM2pDtfRxjFjv7CNTBMsM zM+dWkc*frawhUV$j`KPK2``a-MCHqG$Q1IQZ05JuIq**dQ2X|9-Y`@awj(eMx?$ew zS3^@6j!KFHX%`(F!@xhkydB3=?TSg7Fk}9twb~9T1v3ozBa6Zu3`Q^2nS9{CKdNWM zppPL^0f+ZT$67~87MBov6Z63rL?Gy-B9#d;-u$Cz^8 zQ9v=uGwbC^5!8u)J#c1sBhAkXE^`^G4*fnbJSG>f!F!g~SOB{U&7Yl#1XGGJS{nJNj3Au%Iq;>&w$-n}q~ zRx}~6RBjDMms>wmx}sr-XT(m%EHMKeEqjW&2n$Q61xqK?S%Q!H|6CpYPgT?Z`~Ck> zceO^yKl~wo*UC>@&aCe~LV&a8%&@%oE&2)9k&hC(%Zmo=lNhKGq06!VnxnBwm4z`7 zn-Q<_8SZ(*dDZRxwY2@@9i>aT>r73HUBJ@H{xc{x4ftUHNW#EL*ZJ^=+XeQZy!)@Y z`Z{$uT>wq0oMDYZA0^t4XMu!cwC3h~9OPlWsdh!?Ty=Hz_oR0iOp+r39!3uYd+e%- za+F#Ru$BR9h(JjgbY|7GZFw3`QH62eI=j{sXBhRc<>X76KTZ5=u{H%CYuV2IF9l;Hgv)u0B7 zL_Uci6i)xQD~|&e9vm{TA)E#HPcr}a`&~D*(8V#8Pqp7?HUt}(@hwnv9$IpI)$A%k zSE}|c&dk$u9LYp8Z?(;c{ew~1{ae{AQfu64Wh94 zZaoiR_pkp)t%v&OoD@Ex^F$@!E(Z_nDGF7DIJoibP3EG|kHMavJC8thTdcJ1WnDCz z@qHEBt5C93l3doxuWLjCaJUCzWGHgAu@}*&)ojwYf{y19t*NRCo09=HNdBZ0UP-L=;Ij@oPc} zK0H7Z-nv904R4K^nfJrLrfazJwZ}G*$5Nq)lmTIMi-&fMiWf}C8DBkfN{zh=g`%^G z!ihapTIFeNd3m_cVF?Y|A-}(8cOvM>n(t^2qk7 z?%kt1(Rg-yz3zTXXF(u6`YHKS(rSpm@!zJGf%+2rwms!MXS18@DqNv@%SsO}!r~y^ z3u=={`c9CE{{MT;DLG28p{XP}?Kj}LL@aLLc;_!W0+aUr@N2L8{ z2Q=|=k^|&9N3es~vrkbhbAn)ZnYk?~t*?K(0%d}wQ+Qsewf`nIL^SzmRi4;{2Z1O35!1>UOCG)zg z8p;28C=O{*uI=@l?nwPBTrq-`LY|yN`ADLN38y?u#ZCQ~p=E6D)vRLGJ~?ZJ!L<#@ zI=1YNCoejlEqx(rRsQC_Aou39)=bBj@Gk*ZcO;@J-k1C%zw2FT$37d7hfRL5ES>lw z9y21L_0J0SRBFg6ktYlW<7t+>K3fAZAe9mC`CGV}ziB4EFA00;*1w&< zu))@FRB72tjwdcRDEGBsjZz`U&*9rdRBk;&Nq#txrhT?l9vCi7;@%QcTV!p4`}RRA8~nUBDn&Ek>R0V)RaZ@P^NjaN8ij_rmSPNUqk{z%$4;SND1CnZ28< z#bwGhm3rU1Z}7kaZ7tE6ObIO`+dm5w=ONdjC=HEVJGh4jD=BZZmrDI8lDL3r7e!QI z&xlXiVY^{&z8dTZRx)?x*8IMy+hQzhz3-SEi5&gmiL}hm5@7s&JG4=-f7e>|b72hF2=V1=bbuS2UXDTLLmpPP%vN7XqaE z5Yo}w&PB5}*m~6d8b^U@PDVZT{>HoF(RWv%faelXahe+rXK?t~B^iX%9sX zTM98XZ%u7ijy6M6jrjQoQ|fqgL?rqwJh0qQm3s^r67@!9#EPbxo}GME&U|BVCE2ELEcN2J3D+>YC4n>sZA0eS9x$ zDg&*NGn}4J@)Y7H53BDe_nYOk;%N(nc|9Xp=LvOqrI|nfCi9GH4DJ-QM2T%q^Ypba zk!NN1W!=BkRx&tDbj-2de0^#y<&V8dE#{peGM_uJc%8fuQ1F)L6)#)LKua`QJ07C>jV%ZxE9`hzH9%BA!_7WWaZ<&Ms%{8J!Ryk z7y71V(~Eg0L=MNSFvrQmKDEvj^o*7v!au>s|9daH`lISIHiS*&w2X*yj+K}>4R^&= z%c)fUrUca=EALSjbn+2rvzf~d+^)$d?kj!h52&e{0^2=_z$8gr-2)BR$q$F zaqy@n$E-h<%&mofI%8E2t>gBgw3mTv593ug-p@8g`S>;%wrz=WR|v+K1X+ioOkbEX zR;W&avthyo4+(``@F4}tzCCx)=lcDBuYG4^DO+P48Qgvq;2b9ep`-yXe(>zge*q5i B+8h7? literal 0 HcmV?d00001 diff --git a/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_21_1.png b/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_21_1.png new file mode 100644 index 0000000000000000000000000000000000000000..c731f7e0d7e47437e9a16bfc8e9667e4b9556d11 GIT binary patch literal 7136 zcmaiZ2{_c>+c$}1A8Tc6L}b@w4`Gx^*%D2%l`PqJVQd*oLS&olktIt)8riAFGBSyT zk+D{mu@r`}JZFB-|Ns8q>wT{4eO%X=@0|0!&wZc!zR%}gPJ*fN6?T?WEOc~q>v;!KQtM>2L{#s4Q~10 z@N)7Ga`3%R=i%V*?e698?t1rh;C){|S1-@gFnL9JCE3$1{{G$?3JU*uLEg*PS%KXn z>41(-PzwRqwFu5#nSAI!vWda1Z!?TD-Z3pAi{CC&mhDTcljhT{{MML!LrBBnVu(|?GX_he zuSGZh{3#B0g``$lSXwqJ#}H1Z{=Cdh+}YVl&&<5MJ|rfF?8W!-plV{T!r|wiL|enf zyQQ!O|G&qQ(nhbisXZnYkE^{C=fJxU?hc8&5Ko8%)0LW-^jb@Uq2v!&M0H1N*6Is% zTSFARMDB!oT>WtyNNz3+?Z{R#HKpS@+c6oWk38AnnODaxHdYCTi_277A5)z?3UlgU z6zJ$l^(Hr+Q#eyZQmNnU;3(w&E~qEuky*NLI~I~3%3&J@G1ft~mt~{}|FN+pcTdgFAE@=M8DGFMC=THJ@KvGy^YQT|QOJ5y z%O6sHDCzxDOaH;VLg%ue$;v3FwDeZHrP=goahi+wJ5Fc=o&2$o9AsQXSTrU;wcZ>i zGg)`v$UKo3f=DpYK_5~$v6Q=B1twz9Jhz0j=tO?UX z_SkuQ=-O3ghpYY)3GpQzZ79VswF?i9el6`R)UCCrzu;wCopnw+dgg9V{~zdD%f*O0 z7QAMYk!Y^8gBNHtS~rd{(Qy2BGrwmG#5O`$BOn@bhD4o8c`FW+G4g-^wv^$qA(^sa zB-*XEdhynqBO;-A)!4`C4V6YlKMcfGWp9ySmo2LzBlfL=*L2l(=CG9-VvAS<@#Yoc zan$amiN>`t=d~fDwZC6a3XT^hTf^(d($BSBY;6$=*^Z53n8wb|)V%sM?2~A?HTItG zeZZrBh3%$?ehVE>p-9MhA&EcLojcl#+uW3HP3oC&iKksV&QKA{#}ivg{tFI5{zl`;KqH%sX%B9Uru) zH`n}GsH>jvyLiWZg|bF)aUJzfsop{f)Z7KeAp^^m(y5V|9lOMcu92G*U=ve)S!k zNO2U3QndOEHFDFmT^nD(xjn@hu=L3WSp|?FEnF}1OEC&M(62x>-yAa6!CLwInZt+h z6JAuM-}O$NDG|%vXLKmKfHz+Ncwb~|dHu#njtcr?qE@Z*XA2k6NFY*z9TKBZf_%?g z({@TE{DiTl9QoByCT+sRfRPv|*zkZ+6m@h^XVs2nMc*7~W1TWFMJml98l&Q_v3BAceQxXO^_x9(TV#Y$4tv z5nzva$ZG4c){vdcrf9UH$En2y5qW$&L3@VTGA}k`!-bd?<(g&PWN-=HOg#wZnmDLE6t)C_?(}tu9nr& z5meP+5hp;=4B`83I@eJ+j^KGd9S@r#;p2?&;)n4vuPM5%`mr4Y zs@;7V;RnU=%lZ;xzK$whvDzq7MOoOl9zieQpFUO@JTw-YF(g$XR+cck)PsG8UT$W(x$1d8uZZt?F#R~3~-pi6- zBj|hgokN89AYOI{5}g7iYSm**1IvG+xgP5?qoFZfOzfw`t1qSA@u>|j`-CF=EYP27 zBb2g-?J2cT#4b{+^fVtyoVOSfp)_HtkL5GTtJM3XT1uonVq%Nb2mn}LmTaeU_eR<7 zff(mSQjUK0ZN_$rLDFJ6MOX!^&)m)m&4uXo9D9ERrq4Vn_WGJQj+bvotE=Ur;-A%z zAFl-8^IfvUZr#Q%pQmiS+dn&ZCZ%H?C)w(W&>EQKN1QP*P|nrmjuv(IUaD?m+4!uv zPhJRWd)9DDB7B5}%+=*?A1Iy?Z#a;ir9YUjE!iC4HYL;S`W0A=*`>GHu=L;H^|Xi7 z8U6&E05^bj;?JsYO^4oBxJ8;B>yF(?GwOW+{ObG{@}&2u4dDf2%f8AJ!DFx6UJ*V+ zXcGg1Ucp(dPVD|m3s)YyuE34WaB^wqKY$abh~G702I2{D#NSd!bJEVvuhkjJ6hpdJ=CUPSM!QZ;{cukHT)0 z8zhEZ4Bc`^H-2<~M z7SZuaLZ&`I3GOTmY@dh=SZbMQ+qPAkjscou1Cd31M0 zoH7j(zAT=pi!7ZfY%)@7jLH#v#a1cTUhRe`t1?v()Q+h9K>4H;`V?%8(IHkkgi&J^yA zbt7P%`Un`JN#^Kk4J!+2#A;e0VjXja#RK8Dh?P)n-OQW{%hj5|)h8xS7aY*|as&JL z_UCK;K&pBy_77i4+UO498&A_6_Aw$$$!(%9^k;Uq? zay;m7jw1c16kvo;fa{pYu75$+KDqf|A0&&ES)jO*%N!FvIb%V1_aclOtUkl(n+_j3 zVYmk;N_~toQa~4{Y-K#{jsrH45kGHHCx}&x@zy4=Ve$BJxOHX^gr#$v_ncH@8aMWj zvFKMaVLH-8;XiO((r`QVjPLiNG%h{5KufKQ?-ym@$n+k_SS@|XX0(-Ulx4O3C$<7D zUoHs=?K8Fig5|YG9`K=?da9BGRgD-qMiqvy`pm2^siZQ0&3w0lfXGr>T zH3w0VDFZpwZI~W*4-i(HS~gA{bzmR-1oeyOoOPpd9 zpuBi)Wrqg22=gJ@%3{tseQy5Y!q54gdz0De{?6L2;!P~9top95u0wWKzTZm_gnCJL zB6ItJ^4@O~Z@d!lfwT)wmi&5B*XEjTZ^-{%DNZSLv!aX*7TRiMt8y}NTDlk+-|#jH zVWI!@WF39sUqugQRaGo)w!tc)q#$4kWD6%@0LhT-w zOjM9=FyH_g=m%$D!I9r9@1|n~?FpyD6nyryvkoBOenH9{ zKK|ceP!SH0vp*{|GSRT>f`wSc8=o_O$?QCmdWrmPP>Vkcw9Ti2lhLrX^5;zVDdlL_ zuVF%=ok!4(hV13DnZK%#45~kv{T42j87tmB8g@vf5v&7(4b;v{uOk5mJK?pR#VNb4 zODrC#B>3P+{R z0V@n@vj$Dh=fK~3i3Wk>L04bGTx)5RTH7;RK`2dH&6q4f5N{^T@dgmnviq)r80B+M zl>6Z?=wS_kAE6#V^bPLMqVl$*S*IV%>_t9`We0~_+__>l^AUB16&%hf>$$ujQm^EG zLjX`l)v!;0RO`We1uMH|{+d>Pa=Yl_C^PZw(*~**V7I8Ff9U&`1EgD7{+sb8x#T^?kr>%{4g3rOv#5S_RPBAHH+{gc{dX zqkWPs1Y!MN9aH%ugw^wXF9=zLPJy94(lKBKvE!z=Jh!dos8Dj$pU;mzCW8Fxn=BBO z$qbig)YlgV9Tc_T|dj-k*b)1pZ#bMQr`# z)WO2NvDFra$!4k|qBVr}zH(u*rZs9$=}v^!9pB@_HSG1yREJhz=O<;P&ni#~2py+7 zu|8|msqVV_YqO#_8%(@-LhRML-q6*ZFAmSn>N-HL_DYIXlce|JV*Y{ zl(SAl-ZLJnovti6GI~C9gx6bHVa@JuoP<%K$AnrxPwIuIrrtSYUxHn-Vz_M82!49Z zA3+KXdJB)3*lDD$-?=`|2$yc~eDW1`Q_EVAb#VV-vsUf((Iy|XJi3|QVNNQZx+Sbf zar-MVU)vh`ux)?8s_?i+wux7)H}v%l+cF3P(+8W}_>x4RyRD9P27#Qhp;xbE#A;-= zWo=kI#t9p}QyYjs#(Q&rGuY~XT)17$;&Xw9UliCjjnb@PB z8Am(kPd|~j$#&i~krz8wDzWPGTx;UzE{NJ*ogq^w@0kReqq!Oz{g?&qfwp!-T(T+h z_}8-T;uK&HaVQJfB6ONjN@}gJZAIl)n~+8j$eSR&r%y>EGB~x@_)>VzQa+giPwUU5 zEp!t2&G6RBqnW#_b&@7qFi`<{3zx(m$PJwE%eQX(3$t#ocY*_blyd!y0X7)Mf^oD~ zXc$lVpTRiQshBUt4xmD5j2B01kR7eDgzY}tHlg?a>+D6w>JlKc5KkACoC|KcBpMD?%o@j5+$g%jd8=I{WGhDJ@MN-%HjTunVzwrs+Io82ZMv?X zG%*DLBIP20vQ)!!*c3Q(xZ^Ed`iq;qd()h4h+(wACvb-5=-4wXtUyO2?pi)uwRLw- zDxtxZCNBp#Mdf{03!p>TH=GF-Rie}W#*9udco2MP3IjI0* zTwSOtF+iZfg?{PV4rG~D7VxLus+iTzR`&~A8c}vDw5FBO<#swiR`{?_MEBMTp)zZb z3pCbBuSH4bn}W@an|dEcD6}3;I<<2mus~b8asOGt2f&~woR>%^8qPd=hoPNw+0+Cqp7Vc~7i zT65118Ut>M+V0%jVex?UUd2rLytZ9wrnIgz`IFfIl8rh4BdO#0ce4i@Mp&xn>MN`2 zXr+7h;}>YGTk*?Sfc_OI87sZf8pUA6+%R8YcZRqvnGJU&E@ws$#T|(<;*qMBa7cQ&wLaKzf~kBrY?her1?*A)*&#wx1Q3b zZqXb_+Z=c0Um+Cx*Uq@2q3>-`!>dxVUEmTgDUmSh|!eH{pnc z)s_$o0|kB`j@h97G>vxHW0MVPnwZ$e+wR7CXv}D&$qU&Rb~-vs_L?|YzI>z^=(-xf z9U~Beaf<)RKr6njdVV);T@n*(l6CDA)|29Z^2Tzp2wre8t)BAuUK;{ve37?_JA#UI zi^F4wxAQc@ZQ$^&EdEX#i=!VuKF#`B_GJE_M35J*V}=OcrMC z;UqJ#{0-~&I1Pis;rXAB)x8>=&>=rJ|A>E(GsX?e;SG|juyf7*25DEwr6wQaeV@1qAHaWO0PUCDY!Hm^MUCn+7vq<$ zFXe$+z^v77+@DyXmaaA#!=-}e;~&i*;FS-0?9bJn%ofq>Ssb*!ueesw z(S~J36wzIS)t`qgi3BM@GzGsOxoD{@q|%39yu1_3LcU<{grgf|K@W^Fdc=D0ftH2tHE(c`!OYVljKqn#>?Bl$aOx%B)SFa7eI|YLFg=(34!_?yGdIq?mIwH4SJGXWpnwe3bF+8ad=-8mPdIst2WW%^x(76RnHehj ziv`aH^-pZ7GQ=P!U*)V=u%Eoc;<)1ehN~;CBh+cD`;}2%q@m;qp)Gu`1!AP=gKMeQ z+G@wfD<8YasW>j%q&KBfc`Q$q>&Jt?uDLTqB};Mh+0UYm|H$&@U2_C?0xc2-Wd6dE z2bX&_J2uVQkmJ{m@j_BRfZoUdepU3ZyCnSKk<%+1;TX7%A-IjBLl_vtYxL}+{|h2; B*ggON literal 0 HcmV?d00001 diff --git a/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_22_0.png b/docs/_site/site/user_guide/regressor/LinearRegression_files/LinearRegression_22_0.png new file mode 100644 index 0000000000000000000000000000000000000000..73a6c4d8e6f3f322650562fd184c038771e007f6 GIT binary patch literal 11459 zcmch7c|4R~`~TRpM<`1aWe=HbSrf7(M6z$?7G~@W*^_J`WgBB_LAKCXhOvfJW=INy zVGznPL>Ur(_xOCC-}8BXuix|i^Xv6;JNNBA=RW6L@9TYC=en*W^J`bwSp--h5D2@G zq26@}gytpqd*TQq_=GvstpWT)AE;|&bp-s0IC49Y`qDvTZ`T{`+*8q12+%?D#<{Jd_bUPK|9uVm1>vIY!ryzG;_SBuAAis<9^8bE8&NskA zo*kaL4}qM580lTI3e8_7hJ^}!DIot?Gpp`>3^Qsx(^@mulZ$t84SB3%h|^I(>L{FE zQ5kq^3gj9w`aHP(;7p)|3iNdMefq0ox;DB;n6I4*J+eD%s>~?wa`bHq?0xn*@4Bt* zC`!6qQlqS`ea@G6Yl~sOEhy!dU&`#xwMAB3D^f&U(bGFac)=Dzr>Cda@+^j%E91qB z|8nK=(m@h5pc8k1vp|UD@qXV6Xm(&L8>#65JekXOgac~~O_g&W7IO*XE~q9HGQOEp zR8&z_wrcGX(w08-xGJY4gEegqcJ~41ViA2dHoOfP$-aFb>kI(TW88|5__^>Z^bsj} z1QwEgry|Ld6oNP~kyJk|DjFw_6aMT_+(T4p4u`HwDrmRg!*-C!zq-2JJMWYBJz_kF zZJ&q~+Ckb)98C7n9N9!OyQ+gxj>BI>Pw)1l7Y{J2KRO#ReI=bzOy$vP%deh5B0sJsC$a@sT33&+lH)bMutJMl zE~-=+Al#2TL56O>Te+UJ31p}RcmjwDfp?~*I=x|Rmp*s3yTykYXtNNE_GzTO*d zK&P4>$R$`oO_=x~DRVNpgrrW#o;RL1C!d||EW^WIAXDkJ*k%1Gip3+iYwQMVjtm<7 zvsytikKO8}i}dt#kBy(!LZs&r*n8fH(@rjvm+>ps#bdB_VLgJ3QulnUCF;E+r-2H>CMuRxV{=HHXvJ(|sPZRK^3f`x3|fSBDEv?k5^z zz_)o&M!Ga-MY;LFAxm=anv06MUf20$>6OfNLkAqh034(ad4`Aj+yZ>w^HWojD?>r5 z__x$E>Kb1DHH2HO{b^CfAnIdfL)~FyO5%NeYERdNI3f#I##0mUCW9x4!&vW(9?Q$9 zPgl7`MQ6b8JOLgb`bD z*{PdxGneKD%hiYB8Z=`=eeejH?Ko^x{c%hJF!A~3tdox=qm+K(fm&(>XzcC5xCt7P7nZB!Dcdj+_zvV8mP}dNosLr_<`6dpF3JaMc`9&onONVaetxi zu-%R0r6Le)KN+B6n~JAABVD~S4%Ggq|L$)TcScX|O$in11yrDCQ8lA>$u9 zA<8WW;@nf5`R@R#9L7rocy;*hW$;}tN9t6q4yRg^Po2~Z_2Y+dz4?;H z9RSJjIWjAkVCpEVJ%O`E*?z{!eXph+(u#KW4AZ5}{}68fhC<*9nPR9((B zs8M3#eX;r&Mf4FrCO<6&R?H+02(D=4#W03qQcN#kc2`It77jb&!LGQpX(EWC-zggN?Af#XWr=@nL$2n_>n?7SbIhG- zQ?FLY>i3FJF~?(%#S(Q2kqu1VMU6p28Xf zjbSH~J6Uyh6FVwnC5}yPI2oFywgs75L|WbirSg=)%CGPnqYLx8@_H_|Mg?t;UlD~s z)d@J`Sd&;R4i;Jadq}4a*=8jBXrD+_q3_>k#th+%6APZSNK{3t=1$@+Kq>_780Wt0 z>pf^y7`!w?Hls5`NbfpL?q&6G3gKBgLK<|L4AC7@yw z(A1>|3tc|RiSHi0h!Kcst-5f?8qdHsICjO-->_uY@hLszP757&ox(#5Q(M;n0i{E= z(WOy>gxPw2R@P5yTPTk_X}uta`W4D!PFEaA1t$2V&VG_mM7cS@G3SAjN+yu%X%E|_OV8p(MNgaSu$w#mHFvQ2=uo@FK7$Yq0~*1d#I2Od zWf#GLO1!KWkkgYjsEjTF8mDkmis=5?Ln6*4~Fk(f*y%Q2SZ(jjjz}?CzXvLz{en;_#j1#-XuK zA8PC0xu{&xyZsKNtC(h;B&;v30{Tkqj?NXG=G_GsLez1q019G`yrzJ5)H!Xg|2V10 zlxo2qtGWu$3Ub0y!W{%MeLcg(ioMMZu?^7f%%GO_BU*EZ?ar6QT zG=Qpa*d&*q;dbk3&~=@0oekuvss0xJT;_bXhqU|UmBGZw4D9jda0^#gSLeb;vyBVS zoBv|nf8!nosC8O)YGZd_Zv`OaH*>{FT5bv4OsTJ!!JST1pz}Jb0BTX5?ZsM%%_NF9 z?sqgKYL(ovTn~e-567g$1~bEhMLZ1xK|;FEXHFJo@PTAQeCg7gcke>6UvP5Z;-BEF zcrZMee%uICWi*YdAf_~B<|`+XVCQnq9P&q@D)L*ZkmE5>d3jvZX1j?+jD=*Ytv1ZA zOw!5U%dJzsG1Eb4d?5EEj)p%Y!#G(AG<)2pP$M>Td!jO5UAfXQN zm@_{@Aq}#qwD=UfDfZSqgq<`^0Cf1}2!-0)7<~UC2&KoNEHCNbvC~#mSDQiAk`mut zk}T#vB`=ezoDhj0`b%w|#u&$>5zhQ{1S#ykh0a->!cui^E+bV2E84Hp)WHrvZd>QA z>)9g4mC*yf5<^v>q%yGMmbdN=rEiA{^9;Oo_2l|wY55uCcB5h^hD@a980vMmtrL*< z0CWP*nEQsC_w{HippPg>{J`&g6UZvKrVk%>QMnAFN%aosVB-O6Rv}+NsjHfAj6oyJ z&~BUSyr&hPz05Mxp_BFY65R*lK@Bz?5;aR!>w@3ME@F4_2`^Jcm@Qde6Rz9V>}-QG ze$kY$y$=Hj5MT@RHOrBv9CeN&YEKN@hlJp_Jm7OKD*Af24Zp#4_PAfFppV6@LF{9k zDfN$$cqlp*uyzAfg6sakp!RUU0*ZYx{JIjq(nbmicGwYu+6%%qL!f9U^5vJMUp?q* z!?ugIk-Db|GdHF>Qog*JwF8P3B`uK4ora}pgij|2iySHw%>{@WB zXaN*}WZ_0L+C@~s?%M0vmj!FnX)Keo$nD2N`EosgTd_h2X94E{TRKxwWi6)r;BfY~SdA>80B%xUa6&~ZGU@k~5r;A&U?;&q;5DM1_+m4TPrPR@=HTiT{dvGM^FQG~p- ztnAACFbD31g)MA|YW44bTfNHd46%mf&^kl15yGE8#{Ww-of<2#!6tFLdj#McxRrJ8 z2+g|Z>ENO1Xq#pa#gTLy(J@Y}5e1L31f`yA1h9Vc|$HMbeM1t~N|C z6h|CI5vqX@t{7wi3@GWj3m}k$-!4RB5>t<^&sjfjbH;Tg(oG%ab*=RrP}JYV)00!b zPPAT0*wNEtly1oRRd^nKwo|NYu0N(M-0nTGi0m*(?4bHvyCSbsY7p{%zQ^Z#JL}qD zl+#Kjr%rgamlZw{PhEN1n3yPp13`={rF%}?n*g?&I*~n$(z0ij@955gr>=P9p=x~` zKHXkK(*Pmw-^WgWBS)K6-TaBJ6{J`|WzW4C%Xby+3OAmBk2zJaR`S!QPpl53^FX4@xwnZU@WDEMRMXARY+Otd-6ekE}P9 zd*?=J2ey>-nXsUl6YB8=C$9&tp-;T}X zoPeF06IlgQ%geg#U(?-u9iiz}W5%~kyqkh6mhEk7@9-D>{%)diFfb6M zJ^J9Jl0(4abpboOus@Y$P&Mro5zU6!JIe_-jR{tD={4H*$-mSqcK0+^_gh>gHlz2Y zox}Wi2YtRpW8J?s?=^G3iZ0F_B>uQ~7R$KRwfu{M2<%#|*+@DF&DcS-zCEatFRQNh zjebHNc>HS~rvC05@ol!XPiUx;bMzf0745nZ9k^D$WU;OS_~mm<-!C;Zs6|J4GZi(j zy31kqZYXDuZtbWw-c<5?Ur-acdCa?csgT!7MQFPmMrjx4166=s-7EKT5&j@*n|(^I z^1F?b?I^A(nj+p2T6aHtWFY73y~mCXlbcIS4uR7wT9lMg!;};0k@FW8t_r_5-2^ts z1CbimLfGF%w!w5ar_lGxMzc42u{oOyjTJrhY2ky#LO-x}qZHGpbeZm4xQa9E4&lkmtB^2jN%Nx_UoT!F;kCB|f zrFsjEU)KEct?0^>Y9o&jRa~p1mDShY-rj_joqh6mxR#McZyBiCDD=Wb48?|~B@EjXX$+-mbCBfdL~)YANLASn`B9OSD>{rBn(_Q_y( zE@63>j3*T6iCI-Ab-tMWzbru#HTpS z@>*^8r7mBgW~zT!o$qmsx--UGN@V5L9H+B(ocEfuQCP6p!A|_K?HNnKmlfVK`^(pW zfv*-PYfJ*`Q>Sa><`)NZYTGJe!}=c1WDgFwMbzREAH28M7L7x_;QYshRI(2+Kj}lP zRQzUEYXUxf)evqaXKJzI?!W`BeTgQH3eP^_RK0%syw-JlI2$|4Z-n?ju1x1ey$pGkX zqgg}hISs6YtU@$B>AXTqih!xaIY#E&41tP{iJ`VcXEH3lB>46B`@rw(%$b+7IbdRn(P#G! zYyhQ{*U`MkE0AX&w)GZ`-D~^xMP}{B`e3v}Usl<*h2PLJZj{L`X=}xMwy~qVA34mL zi&z=EU;n%b2g|}9|7BxFlNDOidb6uQzRk>}a(HCnf`L8Z#;{&*yZEH5ovZTnMFit< zVePaJEkOI`HmbF@?%t0uB&@~@Ldx}5tFLdYt9~8(fOOQ!`tJQ_QqOJB!SdaoMF0H$ z-Ha1@(p|Z>x*)^J_arB4pvqqjHfGViRl)ec<3+&W7yW$2b0Ud*u1 z>hHmPI{xT2uE9y!5~CIt6MCS4LcS=UKdYTmc@6a@3~d1Qx3bRJ-C1aqUYPzNVEdiR zZX%!Q6{^2{ctfhvhm?Yl_wXYZD4rf5dsWCu4H`{r8iuKffhcPyXz&^U;bHci!2~I5{{Cx?cRL>-llx z_pc&hvNiL>f`$wF6NaYeuU~&CwL^#?_x#9L@xA~`{=$`s&|Aw+pq|a%x@F+ER38Ci z{iSc(2Fl&u=I4fZ$l655m4E@97-rKX5Im~yX@#tL&;H)MxdC`89Um0#qiBb-iJ^zp zqg236{K`4_?a0HNscO=4hpoPo^+k$fxZ;&mVgJr3-^!}R*Vyb&ph%in&D;&jE|Lel z&xUU|l}#^zitbYoE$e(SIG}I$=W>ziR-QQ@(bUpmuV!;iqKI>4MIh%r#_BheHsXXIFTm+PJ}G zljSIptJ%Ke-6|rHaRo}I5C{JcXMCFd_`n@#yF1WcUn9fvQZ?aXam(;ArvlMDgB?&|sH&&DwdYg+I`<<5eb zuL~pk?R)?ZBSh77G1i2&(FdTD(L6Kr#X-@7w#h-Ya_*Re=`GfS=G$VRq?#khX9hIy z2GvZ(sO*QZo3%H`sdrpR*aT9^lG8Va-;Ijc;hC_@iF<<=sNPPh*|o*FE4*$1sLv!O z5NOclekq}hE~PTFnPf}xjGV0Y=|GV7K^MSeBnQ;uYsR&0A6mpO7Rec=zc^}i9&|_4 zJg*DiEYgZkdj8rkeb1uJ?lnrnI_G*z(@ad{&bCFoL-c~xK%PaXeaV+U>*efKldU;C zDe=YzUiV!+rEYt?P}C61`f1353K_MqMUY$zLXnEaS=?~% z=Xlo$C-Lbv(w(%TNBwxSs`)HRNFb<~poR`F43v4K&b&GHRx#X7cy3csUH2VPYU*{) znr3rhQ@@ug+)fKTf0e(4KdWkq{iW_D39eFQu%PP?Va}$hEiX%CWxnAt@#NFs$!!NM zWq=+Gf4o+_?ipBC`ZT`cH{aBxb#K~|0d*b09#`kP-0eg63mkC*sP4MCz|iX3W_?T* zp@70!ZParEP6>6ftPy32ae-xg_6-$9Hyi1`)IoQAqx2F=TO<0S?74W$(gn0nxDsf7 z)dKI*6sh}bm<}m>3ECT`yjbtG6hFCcRxdUF;iP=)puCghY49{$X@+`(#ctKXhJ~fQ zQ17m#`@QMW5MT7zwE3Z|rF@qjREvPVp$D%bYO!hkWV>X#gRSPdcgE?Oi*GmVsLBm) zZ$?qUK(xbM+M@h0y}rAkOP77c8s@%`%LTfz{bgyw_>M^O*xp^->aQqlfQ&X~+Xl2M zQp5?J?^BGRhb&p?^Wya@BuJWK=uan*1p}DYt00D;;F^cj_}NTS9%bacDVmz>qBCjo zBZK-4?}A0`%swGJpxwAIkn8kusL^92YqKVLT<5ZhOqpr_;=}J?bU-WTGf%gIsMOof_g2#O zl19fDWGCtqKwr(hJ53NQP-Fh6b$*i zUNGJ9zGYSE;lO*+XU`oW6|J1=tY58l4arnJ0|kAs*Zbr|;n3#O2GF6GwW(i;ahf!G zvVqyCf1e+{GA7r(0b1$Bwt|K~%YSZRLbtZ%q7S~XpNm*3lee%CT<&5UG;{|QeQx8y zc{%wf_h$G%GmK_W*iYjcKiqA!8W|SSTzB}9Gnn_mbWw!(BX*;17XGJ5VuTwW`1UK? z+!H7(|C1;0Uz>*8G`P#-tKqmJ%`}lnGA3;zk z+0O2JKk}Yc>)98q2#?|_*IYOFQFh0m`U8N5IK^H--6qKIRX@z04) zqDTORK-Ln(aF4B!y=O*kog9tvHlgcAur|HNn? z9@2HU#aBl@T*p_P!W1z&9I3jjr}rSgXZ#)@3xTllP-_7k&Q-FKg_Y2PF=ut?F{6ip z8l5_WH~JpV7imx^QCTML)syIgI-n7R)+reZg!v)3V0$cfp-(c29v-0TTZeA)F&(is(9> zG^~6uf+>pKA%D9xCGnjLn81HaH!&-m!|3if;!lw@XaCC7Uz$e{1hyU{3wUpK)F;4c zT>w^m)fg^WMp~M&Q9@KS;2jk$zwWo@1b>JzB1eh(iLKWn=YIyE>U1e!W)w`1#>dA; z-T~uUXW|s-D*lz@$6f@oS-#R{k*B_r!y%KAyz5$iR_4*VoD)1Cx=-O^?EVqZLsXzo zU@l^!nb%vfSR^lFoU8)cvxo6AEa2}1|GXV*^u|{Am4G=pc^B#>GYR*GbLFjGFv2ynY=_3jgn3 zS(J)#utFppL^l*$o-)r5Y)!cA1VB;TnMEEzB@PB(-^|AZJFzu3i(^i5EogzeKr#e( z0pOz{Lk8%KW3zz9F``P(4Xp6kGI=IRWaWj_b-flYGVji-2SCHH;Ch5${21P$SIw378Y9x+}5jafReAsU* z-dp+?$LwgVKftQ=W)Y7IO<{X+Rh1Gd=y<>y22c~0bOcUN8|hsct!{SBM3&zI`i`ov zP_q6j%aqT6qS^lcunc3m1SA^rDu6`be%M)m>1z?2V|dbJC8hngs%?~$Se{7%=(^+X=!%DBXBr^B$A>2pe% z7CZ=_M>zWeO?d?UOAX;M5l|T#ANuixXVZ|YOg`T)9<54>uuoMX_!4S!1aPR@p7ykxK>_m(R)A9f< zAEXbm8RML5`y%IWE&Y$Wqkq^SJnr7c6=K_aqBg|%-s&#^1&WXUpM^%mFaDI5$QyJ! zmv&xXTTbI7Q|e8@r1I%ng-{GQ)vdZchEB59E9mEMN{K_uN1u;gs=0)XZRdCrZYgzG zkOA^di_BU@-)Pt&(R4nGQ(AYyRgDo-eq;B|i$_lx)Lee(#MH*P%JF6S=)&pt|JObV z-zHAMEJ5=U^N$o?-6hTx^)I$;4ksr&=tHN7nv%{(A8w#na1d*d(Wr~KACf4qS{6iC zOP>g#v`|xgypkc5-cHTVIKW!BYO(&54WbRH$q0iy>r|O+<1Ma)Kd5Kj)6!7A|Iejd z-neK})h#M2`q+V5H}h{t!b&1x*~%DD9>O^(pqub8unJ5@Qp6n?%p-(%4Q9j5Pmd+F?5`P@92$l8e$%Fuo&x(~lnK+^Odcm3_Oy-QkiEkJ6z0o@!4lsF{O z8vxVojt;gt9nmSxn%hM?*~o&-mm!kHvqB4&Qu?vcew632ma)F{yHUs1^Ra`FLRMkU z;Az^awD3Q-NR1?=F%wwwtEc3S7>~(FaX;F+(Yadi>x57eN+$y=Vghp+q2)-Ybno?) zB2z|*YX=c-V$~i;Q9?60NnZD9y`mFVt&GV|3*^eU-G-k1)2SW@-(Lzt`nBmIa!!VJnPIj0b+joIz*F|h7i zCR4OUwj26n={9gMnCSS^m~rQu!_d^VQt~5SoOHBktN4HrWQ$P6SFK3GtnB2xngBN&PQR_X`afuF*z)Cri)_fZoh}*IZu`!^q4}C&)I>dMx%PCY*Jh-tS_<^zKtf zM90C$eP8L~EyUQh5}thTW~R^yVzfY$WYon9uq=Z!c!Gl$(Lo?eV0L-jI(8R{dn_ad zH%tGnqtWb;$u^-e01#nK9yqI9+3&jPbOgq5O$l8^T6?}1tXeh2>EE)uC(rnb#bC35 z#i~h!MMx5VW+(}{$6jZKU$vClyYmPYWSC1pcFJ_xX=xm6L?;#`+-1wvTjAWgj#!J> z2zqxOZ3a2`Mn0ZLxZtK)VbLkrnKRkusW5R|8HzH66&qfxWyWNWF^zXGwYdNXEDi>+ z%w@sO12DJeZrCA$Tj3%d>yfLhJGQO0ex+r5k7;Hv(CzvDQmi+zP4)CDwNA@Wky=Fb zKYDW+mwRw_)EcyNu4rg3A#)p9rvKpHN4Xt2QKmb76ha*FNJcsfmVeMNIhyUW6Fw{0=Mq_$XL)9ftau%57GxB1Q%=qeOJ2_a=wKtlCui)@wSoiWfP$PLm06ykd zF%xH@CN$z;AzqJMPcDBu4 zuT1p3@(YKX@1JwYKsuULmHkw5yBV=+OuwCqoae^YPQtt9C-ff|f)y)luSr5y8Q3-9 ymBz#~$4<^3qZZnN)rlZ4Qd86a-CpiR*E~J>?1@Ff9$0V;G19-L_e$60-v0rQ1oSlk literal 0 HcmV?d00001 diff --git a/docs/_site/site/user_guide/regressor/StackingCVRegressor/index.html b/docs/_site/site/user_guide/regressor/StackingCVRegressor/index.html index 7bb7624be..6256cda81 100644 --- a/docs/_site/site/user_guide/regressor/StackingCVRegressor/index.html +++ b/docs/_site/site/user_guide/regressor/StackingCVRegressor/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,15 +930,15 @@
    @@ -1347,7 +1355,7 @@

    Methods

    diff --git a/docs/_site/site/user_guide/regressor/StackingRegressor/index.html b/docs/_site/site/user_guide/regressor/StackingRegressor/index.html index 05e470da7..c692c853e 100644 --- a/docs/_site/site/user_guide/regressor/StackingRegressor/index.html +++ b/docs/_site/site/user_guide/regressor/StackingRegressor/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,16 +930,16 @@
    @@ -1627,7 +1635,7 @@

    Properties

    diff --git a/docs/_site/site/user_guide/text/generalize_names/index.html b/docs/_site/site/user_guide/text/generalize_names/index.html index 83f7f4819..70c4cef46 100644 --- a/docs/_site/site/user_guide/text/generalize_names/index.html +++ b/docs/_site/site/user_guide/text/generalize_names/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -1020,7 +1028,7 @@

    API

    diff --git a/docs/_site/site/user_guide/text/generalize_names_duplcheck.ipynb b/docs/_site/site/user_guide/text/generalize_names_duplcheck.ipynb index b43f3b67b..4f72e2401 100644 --- a/docs/_site/site/user_guide/text/generalize_names_duplcheck.ipynb +++ b/docs/_site/site/user_guide/text/generalize_names_duplcheck.ipynb @@ -32,7 +32,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Note** that using [`mlxtend.text.generalize_names`](./generalize_named.html) with few `firstname_output_letters` can result in duplicate entries. E.g., if your dataset contains the names \"Adam Johnson\" and \"Andrew Johnson\", the default setting (i.e., 1 first name letter) will produce the generalized name \"johnson a\" in both cases.\n", + "**Note** that using [`mlxtend.text.generalize_names`](./generalize_names.md) with few `firstname_output_letters` can result in duplicate entries. E.g., if your dataset contains the names \"Adam Johnson\" and \"Andrew Johnson\", the default setting (i.e., 1 first name letter) will produce the generalized name \"johnson a\" in both cases.\n", "\n", "One solution is to increase the number of first name letters in the output by setting the parameter `firstname_output_letters` to a value larger than 1. \n", "\n", @@ -270,7 +270,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.4" + "version": "3.6.7" }, "toc": { "nav_menu": {}, diff --git a/docs/_site/site/user_guide/text/generalize_names_duplcheck/index.html b/docs/_site/site/user_guide/text/generalize_names_duplcheck/index.html index 545f70ca7..c4ae097ad 100644 --- a/docs/_site/site/user_guide/text/generalize_names_duplcheck/index.html +++ b/docs/_site/site/user_guide/text/generalize_names_duplcheck/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,12 +930,12 @@
    @@ -938,7 +946,7 @@

    Generalize Names & Duplicate Ch

    from mlxtend.text import generalize_names_duplcheck

    Overview

    -

    Note that using mlxtend.text.generalize_names with few firstname_output_letters can result in duplicate entries. E.g., if your dataset contains the names "Adam Johnson" and "Andrew Johnson", the default setting (i.e., 1 first name letter) will produce the generalized name "johnson a" in both cases.

    +

    Note that using mlxtend.text.generalize_names with few firstname_output_letters can result in duplicate entries. E.g., if your dataset contains the names "Adam Johnson" and "Andrew Johnson", the default setting (i.e., 1 first name letter) will produce the generalized name "johnson a" in both cases.

    One solution is to increase the number of first name letters in the output by setting the parameter firstname_output_letters to a value larger than 1.

    An alternative solution is to use the generalize_names_duplcheck function if you are working with pandas DataFrames.

    By default, generalize_names_duplcheck will apply generalize_names to a pandas DataFrame column with the minimum number of first name letters and append as many first name letters as necessary until no duplicates are present in the given DataFrame column. An example dataset column that contains the names

    @@ -1066,7 +1074,7 @@

    API

    diff --git a/docs/_site/site/user_guide/text/tokenizer/index.html b/docs/_site/site/user_guide/text/tokenizer/index.html index f7f0227fb..0a651ce1a 100644 --- a/docs/_site/site/user_guide/text/tokenizer/index.html +++ b/docs/_site/site/user_guide/text/tokenizer/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,14 +930,14 @@
    @@ -988,7 +996,7 @@

    API

    diff --git a/docs/_site/site/user_guide/utils/Counter/index.html b/docs/_site/site/user_guide/utils/Counter/index.html index aac0c2890..b058e2ddc 100644 --- a/docs/_site/site/user_guide/utils/Counter/index.html +++ b/docs/_site/site/user_guide/utils/Counter/index.html @@ -805,6 +805,12 @@ +
  • + Mlxtend.image +
  • + + +
  • Mlxtend.plotting
  • @@ -897,6 +903,8 @@ Search + +
  • - Edit on GitHub + GitHub
  • @@ -922,13 +930,13 @@
    @@ -1026,7 +1034,7 @@

    Methods

    diff --git a/docs/_site/sources/CHANGELOG.md b/docs/_site/sources/CHANGELOG.md index 907f9b284..f6b4e17be 100755 --- a/docs/_site/sources/CHANGELOG.md +++ b/docs/_site/sources/CHANGELOG.md @@ -7,6 +7,30 @@ The CHANGELOG for the current development version is available at --- +### Version 0.15.0 (01-19-2019) + +##### Downloads + +- [Source code (zip)](https://github.com/rasbt/mlxtend/archive/v0.15.0.zip) +- [Source code (tar.gz)](https://github.com/rasbt/mlxtend/archive/v0.15.0.tar.gz) + +##### New Features + +- Adds a new transformer class to `mlxtend.image`, `EyepadAlign`, that aligns face images based on the location of the eyes. ([#466](https://github.com/rasbt/mlxtend/pull/466) by [Vahid Mirjalili](https://github.com/vmirly)) +- Adds a new function, `mlxtend.evaluate.bias_variance_decomp` that decomposes the loss of a regressor or classifier into bias and variance terms. ([#470](https://github.com/rasbt/mlxtend/pull/470)) +- Adds a `whitening` parameter to `PrincipalComponentAnalysis`, to optionally whiten the transformed data such that the features have unit variance. ([#475](https://github.com/rasbt/mlxtend/pull/475)) + +##### Changes + +- Changed the default solver in `PrincipalComponentAnalysis` to `'svd'` instead of `'eigen'` to improve numerical stability. ([#474](https://github.com/rasbt/mlxtend/pull/474)) +- The `mlxtend.image.extract_face_landmarks` now returns `None` if no facial landmarks were detected instead of an array of all zeros. ([#466](https://github.com/rasbt/mlxtend/pull/466)) + + +##### Bug Fixes + +- The eigenvectors maybe have not been sorted in certain edge cases if solver was `'eigen'` in `PrincipalComponentAnalysis` and `LinearDiscriminantAnalysis`. ([#477](https://github.com/rasbt/mlxtend/pull/477), [#478](https://github.com/rasbt/mlxtend/pull/478)) + + ### Version 0.14.0 (11-09-2018) ##### Downloads diff --git a/docs/_site/sources/USER_GUIDE_INDEX.md b/docs/_site/sources/USER_GUIDE_INDEX.md index d44b02463..111acb8e1 100755 --- a/docs/_site/sources/USER_GUIDE_INDEX.md +++ b/docs/_site/sources/USER_GUIDE_INDEX.md @@ -24,6 +24,7 @@ - [wine_data](user_guide/data/wine_data.md) ## `evaluate` +- [bias_variance_decomp](user_guide/evaluate/bias_variance_decomp.md) - [bootstrap](user_guide/evaluate/bootstrap.md) - [bootstrap_point632_score](user_guide/evaluate/bootstrap_point632_score.md) - [BootstrapOutOfBag](user_guide/evaluate/BootstrapOutOfBag.md) diff --git a/docs/_site/sources/api_modules/mlxtend.evaluate/bias_variance_decomp.md b/docs/_site/sources/api_modules/mlxtend.evaluate/bias_variance_decomp.md new file mode 100644 index 000000000..29ae3def7 --- /dev/null +++ b/docs/_site/sources/api_modules/mlxtend.evaluate/bias_variance_decomp.md @@ -0,0 +1,57 @@ +## bias_variance_decomp + +*bias_variance_decomp(estimator, X_train, y_train, X_test, y_test, loss='0-1_loss', num_rounds=200, random_seed=None)* + +estimator : object +A classifier or regressor object or class implementing a `fit` +`predict` method similar to the scikit-learn API. + + +- `X_train` : array-like, shape=(num_examples, num_features) + + A training dataset for drawing the bootstrap samples to carry + out the bias-variance decomposition. + + +- `y_train` : array-like, shape=(num_examples) + + Targets (class labels, continuous values in case of regression) + associated with the `X_train` examples. + + +- `X_test` : array-like, shape=(num_examples, num_features) + + The test dataset for computing the average loss, bias, + and variance. + + +- `y_test` : array-like, shape=(num_examples) + + Targets (class labels, continuous values in case of regression) + associated with the `X_test` examples. + + +- `loss` : str (default='0-1_loss') + + Loss function for performing the bias-variance decomposition. + Currently allowed values are '0-1_loss' and 'mse'. + + +- `num_rounds` : int (default=200) + + Number of bootstrap rounds for performing the bias-variance + decomposition. + + +- `random_seed` : int (default=None) + + Random seed for the bootstrap sampling used for the + bias-variance decomposition. + +**Returns** + +- `avg_expected_loss, avg_bias, avg_var` : returns the average expected + + average bias, and average bias (all floats), where the average + is computed over the data points in the test set. + diff --git a/docs/_site/sources/api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis.md b/docs/_site/sources/api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis.md index 631d759f7..050ac23bd 100644 --- a/docs/_site/sources/api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis.md +++ b/docs/_site/sources/api_modules/mlxtend.feature_extraction/PrincipalComponentAnalysis.md @@ -1,6 +1,6 @@ ## PrincipalComponentAnalysis -*PrincipalComponentAnalysis(n_components=None, solver='eigen')* +*PrincipalComponentAnalysis(n_components=None, solver='svd', whitening=False)* Principal Component Analysis Class @@ -11,11 +11,16 @@ Principal Component Analysis Class The number of principal components for transformation. Keeps the original dimensions of the dataset if `None`. -- `solver` : str (default: 'eigen') +- `solver` : str (default: 'svd') Method for performing the matrix decomposition. {'eigen', 'svd'} +- `whitening` : bool (default: False) + + Performs whitening such that the covariance matrix of + the transformed data will be the identity matrix. + **Attributes** - `w_` : array-like, shape=[n_features, n_components] @@ -50,7 +55,7 @@ For usage examples, please see
    -*fit(X)* +*fit(X, y=None)* Learn model from training data. diff --git a/docs/_site/sources/api_modules/mlxtend.image/EyepadAlign.md b/docs/_site/sources/api_modules/mlxtend.image/EyepadAlign.md new file mode 100644 index 000000000..3e8e36c63 --- /dev/null +++ b/docs/_site/sources/api_modules/mlxtend.image/EyepadAlign.md @@ -0,0 +1,168 @@ +## EyepadAlign + +*EyepadAlign(verbose=0)* + +Class to align/transform face images to facial landmarks, +based on eye alignment. + +1. A scaling factor is computed based on distance between the +left and right eye, such that the transformed face image will +have the same eye distance as a reference face image. + +2. A transformation is performed based on the eyes' center point. +to align the face based on the reference eye location. + +3. Finally, the transformed image is padded with zeros to match +the desired final image size. + +**Parameters** + +- `verbose` : int (default=0) + + Verbose level to display the progress bar and log messages. + Setting `verbose=1` will print a progress bar upon calling + `fit_directory`. + +**Attributes** + +- `target_landmarks_` : target landmarks to transform new face images to. + + Depending on the chosen `fit` parameters, it can be either + (1) assigned to pre-fit shapes, + (2) computed from a single face image + (3) computed as the mean of face landmarks + from all face images in a file directory of face images. + + +- `eye_distance_` : the distance between left and right eyes + + in the target landmarks. + + +- `target_height_` : the height of the transformed output image. + + + +- `target_width_` : the width of the transformed output image. + + +For more usage examples, please see +[http://rasbt.github.io/mlxtend/user_guide/image/EyepadAlign/](http://rasbt.github.io/mlxtend/user_guide/image/EyepadAlign/) + +**Returns** + +- `self` : object + + +### Methods + +
    + +*fit_directory(target_img_dir, target_height, target_width, file_extension='.jpg', pre_check=True)* + +Calculates the average landmarks for all face images +in a directory which will then be set as the target landmark set. + +**Arguments** + +- `target_img_dir` : str + + Directory containing the images + + +- `target_height` : int + + Expected image height of the images in the directory + + +- `target_width` : int + + Expected image width of the images in the directory + + file_extension str (default='.jpg'): File extension of the image files. + + pre_check Bool (default=True): Checks that each image has the dimensions + specificed via `target_height` and `target_width` on the whole + directory first to identify potential issues that are recommended + to be fixed before proceeding. Raises a warning for each image if + dimensions differ from the ones specified and expected. + +**Returns** + +- `self` : object + + +
    + +*fit_image(target_image)* + +Derives facial landmarks from a target image. + +**Arguments** + +- `target_image` : `uint8` numpy.array, shape=[width, height, channels] + + NumPy array representation of the image data. + +**Returns** + +- `self` : object + + +
    + +*fit_values(target_landmarks, target_width, target_height)* + +Used for determining the eye location from pre-defined +landmark arrays, eliminating the need for re-computing +the average landmarks on a target image or image directory. + +**Arguments** + +- `target_landmarks` : np.array, shape=(height, width) + + NumPy array containing the locations of the facial landmarks + as determined by `mlxtend.image.extract_face_landmarks` + + +- `target_height` : int + + image height + + +- `target_width` : int + + image width + +**Returns** + +- `self` : object + + +
    + +*transform(img)* + +transforms a single face image (img) to the target landmarks +based on the location of the eyes by +scaling, translation and cropping (if needed): + +(1) Scaling the image so that the distance of the two eyes +in the given image (img) matches the distance of the +two eyes in the target landmarks. + +(2) Translation is performed based on the middle point +between the two eyes. + +**Arguments** + + +- `img` : np.array, shape=(height, width, channels) + + Input image to be transformed. + +**Returns** + +- `self` : object + + diff --git a/docs/_site/sources/api_modules/mlxtend.image/extract_face_landmarks.md b/docs/_site/sources/api_modules/mlxtend.image/extract_face_landmarks.md index ecd75152c..21a8c5d93 100644 --- a/docs/_site/sources/api_modules/mlxtend.image/extract_face_landmarks.md +++ b/docs/_site/sources/api_modules/mlxtend.image/extract_face_landmarks.md @@ -26,6 +26,7 @@ the Python version of the library "dlib": http://dlib.net - `landmarks` : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. + Return None if no face is detected by Dlib. **Examples** diff --git a/docs/_site/sources/api_subpackages/mlxtend._base.md b/docs/_site/sources/api_subpackages/mlxtend._base.md index 56fde836d..30ed82a61 100644 --- a/docs/_site/sources/api_subpackages/mlxtend._base.md +++ b/docs/_site/sources/api_subpackages/mlxtend._base.md @@ -1 +1 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev diff --git a/docs/_site/sources/api_subpackages/mlxtend.classifier.md b/docs/_site/sources/api_subpackages/mlxtend.classifier.md index 5ca0c8f6d..9261c1ee8 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.classifier.md +++ b/docs/_site/sources/api_subpackages/mlxtend.classifier.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## Adaline *Adaline(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0)* diff --git a/docs/_site/sources/api_subpackages/mlxtend.cluster.md b/docs/_site/sources/api_subpackages/mlxtend.cluster.md index 0b3472452..4f2103af4 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.cluster.md +++ b/docs/_site/sources/api_subpackages/mlxtend.cluster.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## Kmeans *Kmeans(k, max_iter=10, convergence_tolerance=1e-05, random_seed=None, print_progress=0)* diff --git a/docs/_site/sources/api_subpackages/mlxtend.data.md b/docs/_site/sources/api_subpackages/mlxtend.data.md index 6191013c9..aaa60bd3b 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.data.md +++ b/docs/_site/sources/api_subpackages/mlxtend.data.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## autompg_data *autompg_data()* diff --git a/docs/_site/sources/api_subpackages/mlxtend.evaluate.md b/docs/_site/sources/api_subpackages/mlxtend.evaluate.md index 171145a1a..30a79f61f 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.evaluate.md +++ b/docs/_site/sources/api_subpackages/mlxtend.evaluate.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## BootstrapOutOfBag *BootstrapOutOfBag(n_splits=200, random_seed=None)* @@ -266,6 +266,66 @@ Generate indices to split data into training and test set. +## bias_variance_decomp + +*bias_variance_decomp(estimator, X_train, y_train, X_test, y_test, loss='0-1_loss', num_rounds=200, random_seed=None)* + +estimator : object +A classifier or regressor object or class implementing a `fit` +`predict` method similar to the scikit-learn API. + + +- `X_train` : array-like, shape=(num_examples, num_features) + + A training dataset for drawing the bootstrap samples to carry + out the bias-variance decomposition. + + +- `y_train` : array-like, shape=(num_examples) + + Targets (class labels, continuous values in case of regression) + associated with the `X_train` examples. + + +- `X_test` : array-like, shape=(num_examples, num_features) + + The test dataset for computing the average loss, bias, + and variance. + + +- `y_test` : array-like, shape=(num_examples) + + Targets (class labels, continuous values in case of regression) + associated with the `X_test` examples. + + +- `loss` : str (default='0-1_loss') + + Loss function for performing the bias-variance decomposition. + Currently allowed values are '0-1_loss' and 'mse'. + + +- `num_rounds` : int (default=200) + + Number of bootstrap rounds for performing the bias-variance + decomposition. + + +- `random_seed` : int (default=None) + + Random seed for the bootstrap sampling used for the + bias-variance decomposition. + +**Returns** + +- `avg_expected_loss, avg_bias, avg_var` : returns the average expected + + average bias, and average bias (all floats), where the average + is computed over the data points in the test set. + + + + ## bootstrap *bootstrap(x, func, num_rounds=1000, ci=0.95, ddof=1, seed=None)* diff --git a/docs/_site/sources/api_subpackages/mlxtend.externals.md b/docs/_site/sources/api_subpackages/mlxtend.externals.md index 56fde836d..30ed82a61 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.externals.md +++ b/docs/_site/sources/api_subpackages/mlxtend.externals.md @@ -1 +1 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev diff --git a/docs/_site/sources/api_subpackages/mlxtend.feature_extraction.md b/docs/_site/sources/api_subpackages/mlxtend.feature_extraction.md index 591ea0357..e8c351d28 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.feature_extraction.md +++ b/docs/_site/sources/api_subpackages/mlxtend.feature_extraction.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## LinearDiscriminantAnalysis *LinearDiscriminantAnalysis(n_discriminants=None)* @@ -128,7 +128,7 @@ Apply the linear transformation on X. ## PrincipalComponentAnalysis -*PrincipalComponentAnalysis(n_components=None, solver='eigen')* +*PrincipalComponentAnalysis(n_components=None, solver='svd', whitening=False)* Principal Component Analysis Class @@ -139,11 +139,16 @@ Principal Component Analysis Class The number of principal components for transformation. Keeps the original dimensions of the dataset if `None`. -- `solver` : str (default: 'eigen') +- `solver` : str (default: 'svd') Method for performing the matrix decomposition. {'eigen', 'svd'} +- `whitening` : bool (default: False) + + Performs whitening such that the covariance matrix of + the transformed data will be the identity matrix. + **Attributes** - `w_` : array-like, shape=[n_features, n_components] @@ -178,7 +183,7 @@ For usage examples, please see
    -*fit(X)* +*fit(X, y=None)* Learn model from training data. diff --git a/docs/_site/sources/api_subpackages/mlxtend.feature_selection.md b/docs/_site/sources/api_subpackages/mlxtend.feature_selection.md index b0226db27..db8c5a897 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.feature_selection.md +++ b/docs/_site/sources/api_subpackages/mlxtend.feature_selection.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## ColumnSelector *ColumnSelector(cols=None, drop_axis=False)* diff --git a/docs/_site/sources/api_subpackages/mlxtend.file_io.md b/docs/_site/sources/api_subpackages/mlxtend.file_io.md index c2d44c167..3bcdabadc 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.file_io.md +++ b/docs/_site/sources/api_subpackages/mlxtend.file_io.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## find_filegroups *find_filegroups(paths, substring='', extensions=None, validity_check=True, ignore_invisible=True, rstrip='', ignore_substring=None)* diff --git a/docs/_site/sources/api_subpackages/mlxtend.frequent_patterns.md b/docs/_site/sources/api_subpackages/mlxtend.frequent_patterns.md index 772b9ff01..dbf710c95 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.frequent_patterns.md +++ b/docs/_site/sources/api_subpackages/mlxtend.frequent_patterns.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## apriori *apriori(df, min_support=0.5, use_colnames=False, max_len=None, n_jobs=1)* diff --git a/docs/_site/sources/api_subpackages/mlxtend.image.md b/docs/_site/sources/api_subpackages/mlxtend.image.md index f0694541e..911add6a4 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.image.md +++ b/docs/_site/sources/api_subpackages/mlxtend.image.md @@ -1,4 +1,175 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev +## EyepadAlign + +*EyepadAlign(verbose=0)* + +Class to align/transform face images to facial landmarks, +based on eye alignment. + +1. A scaling factor is computed based on distance between the +left and right eye, such that the transformed face image will +have the same eye distance as a reference face image. + +2. A transformation is performed based on the eyes' center point. +to align the face based on the reference eye location. + +3. Finally, the transformed image is padded with zeros to match +the desired final image size. + +**Parameters** + +- `verbose` : int (default=0) + + Verbose level to display the progress bar and log messages. + Setting `verbose=1` will print a progress bar upon calling + `fit_directory`. + +**Attributes** + +- `target_landmarks_` : target landmarks to transform new face images to. + + Depending on the chosen `fit` parameters, it can be either + (1) assigned to pre-fit shapes, + (2) computed from a single face image + (3) computed as the mean of face landmarks + from all face images in a file directory of face images. + + +- `eye_distance_` : the distance between left and right eyes + + in the target landmarks. + + +- `target_height_` : the height of the transformed output image. + + + +- `target_width_` : the width of the transformed output image. + + +For more usage examples, please see +[http://rasbt.github.io/mlxtend/user_guide/image/EyepadAlign/](http://rasbt.github.io/mlxtend/user_guide/image/EyepadAlign/) + +**Returns** + +- `self` : object + + +### Methods + +
    + +*fit_directory(target_img_dir, target_height, target_width, file_extension='.jpg', pre_check=True)* + +Calculates the average landmarks for all face images +in a directory which will then be set as the target landmark set. + +**Arguments** + +- `target_img_dir` : str + + Directory containing the images + + +- `target_height` : int + + Expected image height of the images in the directory + + +- `target_width` : int + + Expected image width of the images in the directory + + file_extension str (default='.jpg'): File extension of the image files. + + pre_check Bool (default=True): Checks that each image has the dimensions + specificed via `target_height` and `target_width` on the whole + directory first to identify potential issues that are recommended + to be fixed before proceeding. Raises a warning for each image if + dimensions differ from the ones specified and expected. + +**Returns** + +- `self` : object + + +
    + +*fit_image(target_image)* + +Derives facial landmarks from a target image. + +**Arguments** + +- `target_image` : `uint8` numpy.array, shape=[width, height, channels] + + NumPy array representation of the image data. + +**Returns** + +- `self` : object + + +
    + +*fit_values(target_landmarks, target_width, target_height)* + +Used for determining the eye location from pre-defined +landmark arrays, eliminating the need for re-computing +the average landmarks on a target image or image directory. + +**Arguments** + +- `target_landmarks` : np.array, shape=(height, width) + + NumPy array containing the locations of the facial landmarks + as determined by `mlxtend.image.extract_face_landmarks` + + +- `target_height` : int + + image height + + +- `target_width` : int + + image width + +**Returns** + +- `self` : object + + +
    + +*transform(img)* + +transforms a single face image (img) to the target landmarks +based on the location of the eyes by +scaling, translation and cropping (if needed): + +(1) Scaling the image so that the distance of the two eyes +in the given image (img) matches the distance of the +two eyes in the target landmarks. + +(2) Translation is performed based on the middle point +between the two eyes. + +**Arguments** + + +- `img` : np.array, shape=(height, width, channels) + + Input image to be transformed. + +**Returns** + +- `self` : object + + + + + ## extract_face_landmarks *extract_face_landmarks(img, return_dtype=)* @@ -27,6 +198,7 @@ the Python version of the library "dlib": http://dlib.net - `landmarks` : numpy.ndarray, shape = [68, 2] A numpy array, where each row contains a landmark/point x-y coordinates. + Return None if no face is detected by Dlib. **Examples** diff --git a/docs/_site/sources/api_subpackages/mlxtend.math.md b/docs/_site/sources/api_subpackages/mlxtend.math.md index 9f196a0b6..46b7a1ec0 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.math.md +++ b/docs/_site/sources/api_subpackages/mlxtend.math.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## factorial *factorial(n)* diff --git a/docs/_site/sources/api_subpackages/mlxtend.plotting.md b/docs/_site/sources/api_subpackages/mlxtend.plotting.md index 80b286e93..3a2a387fe 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.plotting.md +++ b/docs/_site/sources/api_subpackages/mlxtend.plotting.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## category_scatter *category_scatter(x, y, label_col, data, markers='sxo^v', colors=('blue', 'green', 'red', 'purple', 'gray', 'cyan'), alpha=0.7, markersize=20.0, legend_loc='best')* diff --git a/docs/_site/sources/api_subpackages/mlxtend.preprocessing.md b/docs/_site/sources/api_subpackages/mlxtend.preprocessing.md index 4fad5b66d..5d91a4f21 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.preprocessing.md +++ b/docs/_site/sources/api_subpackages/mlxtend.preprocessing.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## CopyTransformer *CopyTransformer()* diff --git a/docs/_site/sources/api_subpackages/mlxtend.regressor.md b/docs/_site/sources/api_subpackages/mlxtend.regressor.md index 6bcfd08a5..021ad1dec 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.regressor.md +++ b/docs/_site/sources/api_subpackages/mlxtend.regressor.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## LinearRegression *LinearRegression(eta=0.01, epochs=50, minibatches=None, random_seed=None, print_progress=0)* diff --git a/docs/_site/sources/api_subpackages/mlxtend.text.md b/docs/_site/sources/api_subpackages/mlxtend.text.md index c21adb92a..90a2f0d18 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.text.md +++ b/docs/_site/sources/api_subpackages/mlxtend.text.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## generalize_names *generalize_names(name, output_sep=' ', firstname_output_letters=1)* diff --git a/docs/_site/sources/api_subpackages/mlxtend.utils.md b/docs/_site/sources/api_subpackages/mlxtend.utils.md index 0a89a608c..33b28e20f 100644 --- a/docs/_site/sources/api_subpackages/mlxtend.utils.md +++ b/docs/_site/sources/api_subpackages/mlxtend.utils.md @@ -1,4 +1,4 @@ -mlxtend version: 0.14.0dev +mlxtend version: 0.15.0dev ## Counter *Counter(stderr=False, start_newline=True, precision=0, name=None)* diff --git a/docs/_site/sources/license.md b/docs/_site/sources/license.md index 73f04a12d..bbeb522ef 100644 --- a/docs/_site/sources/license.md +++ b/docs/_site/sources/license.md @@ -10,7 +10,7 @@ according to the terms and conditions of the Creative Commons Attribution 4.0 In New BSD License -Copyright (c) 2014-2018, Sebastian Raschka. All rights reserved. +Copyright (c) 2014-2019, Sebastian Raschka. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/docs/_site/sources/user_guide/classifier/StackingClassifier.ipynb b/docs/_site/sources/user_guide/classifier/StackingClassifier.ipynb index 0a15114c6..454a2c08b 100644 --- a/docs/_site/sources/user_guide/classifier/StackingClassifier.ipynb +++ b/docs/_site/sources/user_guide/classifier/StackingClassifier.ipynb @@ -63,6 +63,13 @@ "![](./StackingClassifier_files/stacking_algorithm.png)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Please note that this type of Stacking is prone to overfitting due to information leakage. The related [StackingCVClassifier.md](StackingCVClassifier.md) does not derive the predictions for the 2nd-level classifier from the same datast that was used for training the level-1 classifiers and is recommended instead.**" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -781,7 +788,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.6.7" }, "toc": { "nav_menu": {}, diff --git a/docs/_site/sources/user_guide/classifier/StackingClassifier.md b/docs/_site/sources/user_guide/classifier/StackingClassifier.md index 4782e3ebc..9e28048c7 100644 --- a/docs/_site/sources/user_guide/classifier/StackingClassifier.md +++ b/docs/_site/sources/user_guide/classifier/StackingClassifier.md @@ -15,6 +15,8 @@ The algorithm can be summarized as follows (source: [1]): ![](./StackingClassifier_files/stacking_algorithm.png) +**Please note that this type of Stacking is prone to overfitting due to information leakage. The related [StackingCVClassifier.md](StackingCVClassifier.md) does not derive the predictions for the 2nd-level classifier from the same datast that was used for training the level-1 classifiers and is recommended instead.** + ### References @@ -95,7 +97,7 @@ for clf, lab, grd in zip([clf1, clf2, clf3, sclf], ``` -![png](StackingClassifier_files/StackingClassifier_13_0.png) +![png](StackingClassifier_files/StackingClassifier_14_0.png) ## Example 2 - Using Probabilities as Meta-Features diff --git a/docs/_site/sources/user_guide/classifier/StackingClassifier_files/StackingClassifier_14_0.png b/docs/_site/sources/user_guide/classifier/StackingClassifier_files/StackingClassifier_14_0.png index e89c286aaad05d4e6cf061e4a84d6d972c627aab..da3f1de1d4de0e44b00cb22251098cd5b5f267ee 100644 GIT binary patch literal 32307 zcmb5W2Q*ya_b#kNM2jGZo{Zjmi#58=oh3{OCCpFmsf zv9TXwbIw1|;>E?)-UP3CX0R zW3~Z;MgMC)ML-6XCJ>NF;G?y(|9p;shK43l=S!}(nwnY%DZMTT_$U`S(gM5p9;+u` zpGPf_0UT5dG#KUrftUu+@v>FX#&xQdIng|8pW&slRfB35LD}#ZnNyzNzvxZTxnpEE z8pe8hanhqK>pO$Z0UtNCIU*zTG`JKp&?WQe+Kj0zu#Do^7ITqSfwbDZX$yRea(|=v zkKyihO+LkHXzzb_q_swG(k`Zbno5T%DDM`Cx-j4ctLAu`8s+( z@^U{S6k1QR>MH$ZDcp7a-q(0V8EJ`JNCn(gs!BkzNnm|%si)^f4ZMYfmF6M<>=ObP zmaJlygpQ&ykpY`}l1)?S?8w-=7yan2jHz-^x4A1g;s_g*F_kvpk_8WX4Nl0IIw|T# zz=Oun<3!G!Gsm)3g`DT*;#H7u$2m@5clgUJxT{RnDOiTmkLVtU^oVq;rYe_{u~ZaC z8qov+uN>CI3%Ut@jr?ciCvHKCa0QPhjXk2#wg$`4$kCfI)-({NQ^a<#w6y+4gMFdv zyrE7gGSnE@=aJocLI|}cOz);hd_d`wMe)UQloe_s6xy^p*npxAF?>X^OSJoTWhWnX zbm5nfa}&lOe=OHLd_vl|CBt$e=kYp^Ab`1EbOdt{k~y%6>S?VbOq0Vt4Gjx%$bwM$ zS5H3i1nq);3Ut}(`l%1s>}9jA*72*_cpJe{GuV!4FhxKHQ0FjObttP7peehd<}#eIp=cbMiQj#RQamV$jTPbg=~1q6V@(raa9R_UQWg`G0bF z6vJP}cDs{wrB9Vu#qUup+DR(pr3I;RHKF5UQ&_1t6J(Q!L*PNP=xoU3_YT2ug05xyH*7L$7s~9DD+mf-XyB9I3p2Se_vk%eYr1i2UrbyqK9FS-y5eHEHR za7EQ2@WTVMbpu8}rK$EV60*JH2Vr5o2MB8y!xdLSpp}Q<_k9 zwd{BBSTwr2Y6u-4sq;ouqKX3T@&QstRYdYo15--~&6tYhLUKd_F6iVhV&+yLb9$KU zAMvw_J08+MHAM%@JDa9Owv|CiSqO+i2)Ca+p$C(Pt{rA{Y?EDkZ<*Jx@Yj5I=5CPh z?~UpudhJ4@7k=NCAz6@O(lhpeWd!u*$%Fakyr<3D>)4~pU^b8Eb~(a+g`i+U%BanE zG(YFjZ};>pt?u~GzilSkYUy!OvQ(0dM9aq=Mgb1-P@VJt>L)=R% z5$JpJ?K`<2ob*q*lc0ETy2k(FQb9{$(L8{^rQraq9~l1RQ%ck#XoT8^&qbI^1(9&t zI(5G2Dfh(x!F2TW?>XAUvJpk({d7vBVXh<|a&rnM6DdkTDfEb_7Xnf*qp9jl?5aBR ze2$eyn{zGO`i@TccO+jD&wl%<6Pp}3oSymg58hZ-U2R0C5zBZZ*#_|LBOemyLx~fI zv`4@C(r9Pt^oZW`3kO_jqTW^DSq}Q43xgyzytfqDEIl?rL!+%#DG6vvB*7i>kM-L# z6P^dEvIjl=3A-I5ynen%P2b&8BO^t_5^XYWi6n)#DlRKB7c2j(XtDdvj^92dJ@ZDt z5Ht}#RZWIh(UX7Ltl+6)oz86QZIb%5b@t&bt9yuYpZj7z+B;KzSf_`wkuBC zCPqyS&k}n*Du{sfup(@7cl)NmxarXNbQfK4;QM8L1?8~;TADq3F$o<=hT4sg_wR{X zk-O)C_Q$5TKB^+ThhJ_&P|E-F-2kac81xn?z0u1MVyfv5iC0)&VM5FKcZI}10F7kY ztuk_{w4VuOpk0`E&x>?9h{}907XnVDw24+!E0wbrxp-P_6*$5=h(QlhoUMaz#=vwK7+gG~g}JS~2EaK@Kj{U9DpX3y8r+xb4a!ZB_y zZ=iUU52EhY>2YoGAxC0?Aq+uQ~~_lc2Bzg|fz&+V0;SVYW>%GU{gFQr-N z(q-4&%9s|Jc{}l2!e@U{Zt#adk5>s1DK3Q@v;}mqfCa%pij2>6sx8I&%?EO z{-$j!rG5R2&Qk4MB7Zwy@?e&|;O_vl0E4NR!#1LRu=lfeTEo6FVe0(pyb<)cMWyi0Fki*KH0+?Rca>V; zL8i^csg=WR+^RDVk|F%TmMEW@z6ZjgC* zHbhV*n42-%Kro@K3Ms&xcbuY2jW|!!v7g<+tIZGumG70}C3r29bK+bZR$s za~C#r2E@}QH-Tu)|Lc+A zkT%n*#y|OaefWk0{?;$`{XYW-GNq;FW;%XxQy5KFw$jTo7+IJg*1?D0zt=_;Bo&Cv zfMXFh{BQLZQ>>E&`pVnHKT(cqCEKk8@a);G-!PzA<5*3jDs|5+}T#=2 z)6WeO(nvrFKAF++ESEaG!y9%>25;l5(=Fh-bV_0S358c}N640JZR_j~n{$q?vazQ6 z%eo0-N;$uWF7rR(W98+4zm2Le_w1?N1SR4dS4N(V@W_tjv#oI@AM{TkcqCbzz|RI; zfH>O~tVuFWuHEQI$3rO6a7df6?YdMm*rGAoqRSfmdrt^z^YWEW8M-zq`*FiLrU`zy zs-41NtOq!oRnpIrYyHXfz+Pf`m~9Yenzf%i1*|pXk9|9_JopD6QopyLXmjKMuHp-u4O%|>v~*N(bGm=M zbgp66e5TTdB4R7pEX|ekn4=BKGS9MGF*7|~kN?##zco90f`;cX?e2|)>K2H5C4;a?{8vcR&coUJFE3{_Km?oEiYJ4zbqoqlGc9SBuBHic2O z0`H_!h&{gPL*Sb}6^!0Akz`3t8P^)&s=U<9b^h4Vw2Ave%Fe#Kih5PYRU4!1OOpw! zi&~fcK~@*>9ri6l7dg`b=!4^?eY+$COvaiM$s?VX? zD>qXie=&c$Hr(?L5zona8AeQoHRi&+0}rCzyA*v^yPkHL#$of_k!quVd7^pmO0LH8 zON~oEmnkjjZ;d*xto+lJ**hfua(`Q4=Wh$ujyNEqvXgas;%qhlli`S}A8s%1=;|=` zl?#1xyv>zl16W{M@9Aox;|~G>AiPz-w2EeY2*lA0#4(R?ZcjC%%C2bK$c<{MqsJ{v z@5b@v{`@YsT@e!#bDoZDnQF49LP-~n9Le!=0-=LAow1?`ts>84KF(+f zVgE_J4C%)_`&~J#hikln72lX{B4yGxAQp0s{|;hDECjYorAl1x4LD~o`3#9Y5mC!t z^$$+#L_Sv-&+A8C?78EiV@UX6i*^dT#nZVT4=+91ISB45idWOu3=d5|ftgcgro`o8nxh|+biEs1^4@pfX2w=F80`i`^28&I;~@=->k{)iBV$@a(2 z)u#40qg0u4zQ+wZ3SO@gCM+85_yMr>CF=x4WOiA)x*4=JpTgF9UO6{y>O6H;RT_xH zE{t8ISN6mvN{&c!pFcjOFZm*Yc83kX6(XbsCw3!LF4|1P$2N;>X6w56k?#--0IV2g z#Q63Kd>UHmYhsvR{is!EWz^Y3Uoz8L*6%qkY}GwX$}!Nd0)^w&=fCAue}D`g{NfpR z9xp3jr5kN*1y5B=OwUa7E7e!`ACH7R^Y@)-3R4z0R(sAFN%@Qm$uL(L`Anqc14KS8 zBKX)Nhw1n@VWFV;M|1xMKaz_*PRhOrh$y>R=bRerqCP%jIGzkXKAeiZn4e^CDZmBq zDr;V@YguPs&kbDOJ|1~k_@P8_Pc<+>%FPzCN}2J7bxXpic>0d6-7D2jlr zNs@|kPV^Cd^A+aYxTO4kaG)~DDT%RA*%u&$`?X~Q#B*QYc#=rQ=d=~D$QYi#6)qdN z{+;PI+41T91F{-RwMu;uQ>r7(b*}#NwJ-(SkMTX(Qq2or@U+$d+^TXvY^Vfe*!aTR zGCYL?0tK zS>Z-XSDrn&{QK1%z>fO#GHzwB(zX;Dc5_$GL3M{sbG%q1#9~kWMT!iHD%GbLKqEcO z)7gS&AD@@`kZat%<3?d*Ne9i}onN8HioeAaiEQLXYHe$AwbS;NU#n0tGQb;n!N>N1 z3-EQy(+-G~U3)i&8?P7~JdW}w+s-k;_u}^GvN}Z8^f`5t^pfC!Ki$krEMS!x%Cyb) zo_~q|6P~U5M>Qt=xq;QAB7U{Yp};2t56Jr{_}j9qT5@!O5M9TB_%hYU@_-=v-;av;e*4?@f0b6{=djEkaxQrD@imQh@Z-5z*+j~! zQVoTzyVeFJ_#|bg-cTXBAPBnoy`l!pj*AYfYX=11#LCA&_&VvoqZckq>ChKZAl7Un zr?4{@MbYrV3WAxH7f<6$m-NMi%GO~`cGqbUMU^JMuelGplx~&)2ckCEwA4=9@!Y zK}DJPU#SU052SymtkEu6q*ehr2zkGPQsLwq>A^~zz5ud=(He+r zN=es4?hI`sGPBVd+E=A78^Rf0O>R`#-jnzqnW|grg0y9!o;@URE*?L=S9UYthQ1B# z!DrkHi9gPCd{gBgx_p_Z0!P97Ju&jy2Pfa+d1vnyymcDe?wrpcH`+T^4={k)$KT(l zpzZ(3>mHa?u%%lqDWWwCKAt67m!lP)To2xU#RW9fJbCt zb-9VB2QzWa=PkZL<@)b{EZD#g_Q1Y7B~g|u!Q)x_bnsZ^*2GLY?80k7c5|$eyMZ%n z!s$8e$okGH$)%)FB>sJegwQ+{6qpe84XOOl`yx4UvR`T5;C^01)Ku_2n|%V}{p$^p zl{aOd&aLL41VX|7xZ_lYd$>f;-|l5?4Ps#b*s~tF+G_VG`9Dp7guenyJ4u)C8}TZM zlpDgx@SwaD4?kRMyYlm=s#{RY%3{@K%9+{PkMFhZrDV3}>s|*w5SA{rtkV^+y_m+? zvY+>G1|pA89d9FUICn)|9v_%X$uBZVQV_-UJ2wH>M>*SXq4qQXin|C`_ zs{m-2zm*tPJ1#>WL*5j7#~b&j_k3@h%3bU@#wJ{cKi6JW(A>S)?)PrXiWVbn2bTsGglvDM*_^c6|1K}he;ehyJaD-?Q=Bob7`7^^vVe;ZGWhfP zD8>5X&hOPLz@D4&0>aZVy2x3r4aQgZBpM=9<4Rqcf><2b&$H6YnMB*c?B~*#K^r@s z*0;RDW?RH@cV(#Z|88ZaY?ZKH?S_~I<^IHg6rL>OY@VjMfD#n%O}J)7i~VY4~;YA+PolE@8lNO zG=KYp`IhX5sXdg;lcNXw5m-96TdFpMs`q}yFYNql7O=miD)*<_iPirD?u^amXW14v zv#<2f@tqIrb;&Btk+qgolfT6ncd9;_U*X=>?o6VszVYtnAGj~sM&$GfmMgb?tD8fu zZ#na^DJ1D<1(Nx5Di812teUlShi)?JYYrOT?AjUf5#U3f@ecQ2ktW%&)w;aBKg-IA z{6%v2L(Jsf`BwErjgi6ng=OI4P`bKKGi9b2>{S9~rtjqdfnq>6RWmY(#P&F&uaj4J z1-L0Qu1cFr@{M<20JQa?=UVAtZZA5V&>z##*t5e+IKkCo|ME59q~}>f{H>@<0o0|B zfySeRg5@uR@eXX~!t5=u2ewG@cpK7Gqw@f1^I*$frfy8a)W($U4lNIUu_rybh8LZU zill<(YzSWw%4uXnk3tN08tt_s0|%SW#cJCD|x=SGtH} zu>McNogc`QAe=mHNCF#7;Cvf$Lw44KEG^hy+}c{6@BZz1MSSuPL!rIFL9Nkyi#C!QlUK^v*<@|k7pc2d?_8{v zQV%dK8M5<%p<||36pZtctMrvu9ky~iv&FXC9L`!Tub(q{zc_Na#4;S8{kqo$b5tDI zk*+#D7XC1SJ(ANz2926TM@NWlKU?OT_Z7_;mofBr<{mIN_gnwY{ozTWk%eO1+1aT7 z`u8R|EJBdrdUx}Io_Crnh#z>@!Y?MR$}Zb`Lc!;QF7;Y2Z> zg;8(v;6>>BBvJxCk?j7e!R-ECUTaFZ?4V=MzJB*VQx2l5E6JkUHFZ1^_fo8hC5Jwj zPf!MBhg(WlZ%Xha_-DO;ujJ&u&|Bt;?L_~K`bPav+z{G5t-C_i>rmbR6}MY>gO0ve zSza+Q+cJq?n0a^rIqe_B!EHXPl&%Z}H={o-kvi5kVki~P{*8XNUW##>%zP6>b>tJf zxq^k7Ci{I%o=}Kh{iZqaXKumR;M|AmV`;l%K-K51nBse{8nLwyOB}cB4N5?@9I%47{VHUOi%< z&MgmcZSw;j3-i6loiFavQ*k4O7W&J7rdglbpRQp2FD|F21>DX<)2tuiDV?ck-sY%l zlNr0Veu}3wuXYUGe-^0S_fR*(j0N#oFpY-X)n=mXywR0oLH+~lh5wGq1)YwIQo5#l zQ|Lr4kF@gP@2jC*J#N(}svc!DRZm{cyV}l91`+z2Gxev_$ja{e?~^{a7(Y5X zxjqm@-kDKOX~u*0&Dp^d<1>zLnR{zpHN{*e*-OkScx!50t7|XA|NgzBt}f|+JXP*{ zJTA2<3|9j(Hu`=m&fk};(l2nnZWIx(SNo`sdcxv+C+2wV7tRk~NVOUb4h;=q@NZ#t zpADCJiL9{ime+mUT93;S9+&c2Oyj8>#L7n&|IG!+Z7BVSesxBonKS-*;Fgzj^ z?K%_xrM^D4ZvJg<-pX{|ifQguaCF_3ZGSfxetK!4b}<77e^C48$rp7+D6b={ZD#~9@*aAK|Cd)}v~^ayFyoCN5R8Hnx&U2pl-#X^KxlbgKng}3rD zQtT_lDO9-5Jsxh;&3eF}D$Mc&-{%RF)De7Nn0RUNcDlm&ku_np?YYUcth|ghl2!8- zXAb_agb(FhpOL=s*RpN7SLTHVroOD^g0ctsbnZ*zS5a247H=y1oP1Y2Sv*qWd0%eP z5E~vo_TdSVQ?xwRqdqnKrdh+g;E;N{Zgn-h78f#CvS4GMuR?NjC)oOls_HjD!-f$B zloN)IN7B!0Ubk&+8n#mZR?&qmlNww}K~r|x?bC!;;129x8~k}lNQ@lavU?>56@N{N zbHrPuHW96NSM4ZkJ_k&qYEW6vbI;<7E$C#~!e{DE-6;eoReGe#i^$ z&PZu>94315bu_e$u0x@=JF}yVEETjS8oA?yqcm?>>TJ!q%RP)=?-5jCG6AG|bTq#J zq*5_|rKsMMax`bFT&#J52bGn!huk#VMlbGK9_mUqiHb40w-lNcuD71g8NICfONC&0 zDmSZRHE)2<;1_s5ZgMMv!lR6}>Mi;eAKyJat9d0US!#!WLjlLL2TduT=?!0u9cb*u z#l1*LiOv1`hd3icqoBK+ZK+O27DF8g# zg#5nvjCcO`#*dj{AlYng_EE*w|F;3r0BU8D;#tcRRl3 zQq$G-a~;vvY7D$9HeEJsbE5!0sASYmd<7-OTKbbjFm~o#UR5MyX}uUTG&U9GRJmeU zeBRfu(s{C1UTNQN1MDMi-cncRmctn`v&4jwNy)nB<*#xT=!-5PAZ7nXw*TQOU$-^H zGOxP-vUAUgiOeHKo{r7<%9@1ho=7MLWXg8;40;03yT7k*_$3Z{ zdq2)m6vorH+W0O4;jjJDzYC_i3sib{qk_4nsSDW$jEm{pat0M!0M0^9_J=0w3nKuPRp$<}ka&e_8aeYj5^PFVf^44ca7FJ^oha8a2vL61K6qzlBpnSQQK?mg~Lzpja2p- z&A5G&ArXNyRqVyeEAzoPDEq}{)11X&XCFf{jc4byGJnXUF#g+k zWLzfW*Dto}8tG+S-x&&bF$qdzLkkNOJ9Kkq8Q6DcCtYl{r3 zk@byoYzN(55WR+V_pX9A-RpzD(t%sfg*4=IYOu=*xvgr5S!U~dnYdrCx0+K zE#n_Q_MMy+YWi1y&Y4!kZRfWcJPdHAV1#}2I8l7^mG;-41GDOXeJRb1G`+%)S0-}R zO*Pn!!nJ=IOWDKY`)$n49Y$tZb8Ec8Y5g`$<@);=h4RH&B~+B0uhz-}w5YtC1jEUC zn-II##Zo-k+<>e30?-3MMfg`OXZG`cw)8+XJYxq)diDo4Rlvu75-cfkOLO1&5}}Nz zN*bD?rC+}Sgs<8Pgd{rZ0BspQx5d!n+MG2n^n=d zxfG$C9%Xl`_~lHCvNP;}Lj4s;_y76BSi4*+-bBl@-$;4`C{*pYK*L2>@`?blyn8WWMUkh@Bd*WpZq5 zfTKLVushmqO+O%n34p?$o{s51UP)V6zL986py2EFt6|1t88Pvq3lk9v!)q791=p=-5B*`tRN{ z_3PnW)0?Z3bYw~U#aDp-sKrC3>?ueQ)^&5W@426uNW$=hfT$4*i3Qe#rbPLBK4Mk= zD8-O|5%UiILNtrtM&+Y!rReCGLPP7|00&XQ`D4FsM`wl4>+6Bx)G9R-wgf<4L`Pmz zVbjj9gYaYgI{fBAj#54fwVr%5A z`pwY0-0#Z2`O$GMv)Oh=BWcO!H0@Wte*GymHeuJ}^WxZPJ|NfC^TUKGtM7TaJw>w>k1hm67|!IlxR*Rw~i;{Zu-x|Bw(I zNE&>35O#qPH3-+<=c=yHhqFm{4b8e_=TFoYio;7|7n`ikbm?-a%pGin9V*v{hSV1 zJb>tnKZ-G0eC-(-J|mNUiTlOv`>V`O%#>ANlHeWrk9zyw)>JrBl@pb+mSe$I_xJ|? zgNz=5`m6h*?yP@2xG`+SV;1pM$4Ff-+d0+%}t9iK)tzCTDIBz1N5dXAw`}F9yk?+w%{w9Uhv^<5m zpR#dYI_WOX=XVDN#weMH+*^(q^7D7jTiaHqYpw`WN5_54>$X1jx->?v)SI0iZB`{1 znV6{Y83Pp#fT6H$ER~-aZ}lqZ0m3 zdDgwbK(D)TaHPg{z|GkXLO<=0KbefquQGn2V*+G=MhC6pV3pk!j#XFS-n zajQNZW~ZkH52;8KrPl7A z5n;No7n&#b+2tEP$s#|OpJYilL=jjX7zHO4oZR`Q^*oeoN)MPF9d#q}j!c3EAQ{k& z)o~C=QcJPUK&FfW3h}6!?343^iQ)4~egz&4FfqM0NQn=bLflaDpCvV-L1I*ar7#{BEGy6$y^u25 z<&QI{9v}#_z6EoJMhtA;$Ii0M9lxX$x+#2{G0&^f)OdPCE9vmb>E~BWnKq42_1ru!VA z)4X|^Ws?NKTPWckdQ9H0R)6=n?uqutM6TrhXKL^B=-cc))aaiHq^PNJ(S3JJS{aH= zij1^lqSvJrisOrl)JlCm%tic7Ite(gS>%{>ISd@n`xe5GZ(OD=B#_aeGGIiTNcqJj z$KTI9`aNW1oA zftV&rQ{6~p8k``j$~y{n=muKXyCwtR3ZP<-EXfiM1L9I z1E{WfY05D?l9@Mz2)hVwn{6VR!~yNPDG;dk93VY`9`8$z{JtGHBaiz5|4dY#J~fJI zs7soap-BvV4vW%yy?-{{CU3fifJ*N}*>+{b9!ycu(J=9?rbXW1|0)jz4fJx9e*9gw z2O<6hF(OT|{)GtIM+aOiL1)o|MRPU9-N%(QoPNh=8>7dQNmD7-v^Q2w-3~0J;8Pv| zUAztfshQ*O#R%VIAl{v{6)C#;^!WQ+O$$E|jHbdYU4XzoW=#ZI^x-1AY$KAx(uXx(T`=?A!4{z4rI;n=2#^TXB z*ObrqK+QQ3btqrhE05Dh-TW$?WWS{Qwq&v*tnNoK)P?%Zi(=QYG4TY;;d#uVD6FE} zNB1Y%4M@^OSJOIj>D#!SfYWCo-M9aCO(KOuCS1bmyT@-lquB!lSpnz@`_j=*0cU3> zQkEOGk0D;i`}wa-pol$V}v*c-1_-_rj~OzT!|VzxKpV#%K#0q z{|a%5lT>T8;5t9w#55f6G7`@fAXFJj`p)WW84 z@-LoE>DV<65^M50AeP-k6>+dz>eo_Z#@uvN`J^M;Vk&d#fogzDzq_+-r)wRz7h+rB zN3ry<5m#IX3zarkyE5Hr9W)$>tp7t28_A3=U}Y?p75C}By@)Q*jvb5dajWJ}!%9HA4O&`eZv`lO` zt3hw36O{Zr&akyb;CA8&1>aPwx7|-tpp+BqS}QkW8m}?$4($dcHA)N><|i_`&D&qF0e3YvwHI!((iWOB z#^8n798by|$4WXrU6jfo z%xu#2K@5%jGLD3vl{Zl*uxAZ*^q93&OJ~X$PK`4_==aUK_3R`H1A;+o-=X1gO3Rd# zfevORc%pkVBO^d5%QK!v4fgmAr$LmJ!8ghnpqL*YGnVN{ayzcV-Q9GMjl~u^Yn9AI z*Jv5e@@u25letgN#GUspxwwdExhA!URl-Xp(ZoaA9fc|KK&h_F3JIVTy?$rM*aEkF z<1^Er8$7;($oF4!`MW#Ub#<X&U8%l7 zlSbAB>wNrJ5z%aWID5u9?qJ1qTT=MTa?DD!*Py7DP&-ktsMg!?&U18_8B&{e)M3~T zTu1z$JcN`RsXZk+7op9Yn#SuD;I=dTjWket;4dl1;Q|U4>C3!7B}E>fecRsb=QX>k z;+hw9zkz9d#z~9G0;Z9%eZviOoI428-ty;x=%(xZ6MuGpTaAgw)9x+In^luZrXKLC zKqp%!&=m#{wO&X-vwJP*c<2Jq5*~eGYq|x08Ts0uKju|azwTSR=z`_NufoE^C^>!4Po{i5VBdvFx0|K4l&|7r~R|8*(AkVor@S4o3wG3eIJO^v9?8$d)2dc0NzbPNP8 z)}g+mLQ$t+pVyTvJRk{!K2aL7>kx)B)Em=)LqJMV#3*>LYigv{{S<2u?L8Is(HpOg zelO4`x`IG!KsX5s<-fK52rNUL&Ukvjj-6G1%%;EwpHjrmE*aNxP8}*&Ju7P-R9k%o zd%TSge!L^uly??%#^f|jsS|w$ePd_N_&;-16d_@aXx6|6G_B+d4sQ8wHOR*R)|n=F zWj%C`7U2)dfkhXjg$50{xJwEWVTjpv-Kx~oh_0~|b?6^}k)Ln_TN7s!?S}$6usctS zJqSdc_aC6m7+pb}Q|zYQh|fI2zUYm^-3rK=;xV0f;9WJf05;WkAa~LsBA?4efwspa z8ZQ?5|I8GTa0YtQM{>pu?0&<4*6R&88NDel3GGMEOI5iSH0t)g2CD)UNq7r`e~yZ_ zcV^Q6T!8s8lOzPLokyF%r=CihC$1mov89<$|3x=XZPc1&9}HFbx5cf;TS70!QU>Z=y3(?}B|u#Mt;Cn5^ve zW3DiOc2l5${qcM*)hiF*#}24*vdAg@KdofQw*O+-{RFrh0`eJXn&U)H5V|LS6r@zp z4_X=g)uV5s7a~~|g>pibes_z>i{BT7@*%liN)zkB%f9S{sC@F8#JL>Is!5u8_? z9_5hjufPCP2#7Qag5v+~_KcR57(pX%eMzwEB-zAUqxh-^OdHvsv`CU3ZXaViz;6kHq4)!U8Te4s)t|Y5C3L4o6*^u*S z<-4aXlW2CyDogP37r5nXaIo&Ub4;*6hD7>xQ|?xx{i;H3P%k?5)V~$7U5IHA<^F2f zk1mm?C2B%%0{wV$|0ZY>e@j&9XG|eE7Y?Y=o7P~T5AYxi`Vzi5;fo3Cl*cVRX6;v8 z5UY|!mprHxz=wr>w!}320UQJ<7}I7>0@cUP%3~UUpqH0QLm|%>xVg3R?trl@D7;)A z8%_)*=nrW!nqdW83;Z)-0x`1fJX@atMpT_wX-VwCX_X2i-nrN$#goYileof(nwJsI z9REVmLMkE0S|6ed&^ROtAD?OD;{cY;nBtKvmaZ~IZ%!6TJ4dw8u zs~CJM60XFyrdtMd*j`9s5JUJ=}q z{WPcj)m>9#QE*`M zz;JL*@?Br?IND04%2`399qnShSd2QwGypxd!kC7xo?C;1w$UD8qzc z40>L&s!>bN^3GIi0{lWrLc-J@j7$iKk5Rm&TYS4%l{mN)GpD&7(7)W{@Gb^^(TQdb z(LcS)F+Ky99u7)TEWL0J>|R=^>Rx~zj*vQIYUtki95rHLF9OaMIWcouW? zHvsL*#OYIa7s;9pi--N-H47n(+GeFHW#EV5XaC=*K} z5|8(iBP{a32;)xY4?+}nCEiI4-I%@WgcVzyKz+wFJYW1*gxg<0S%EE8ot3OdEW$xTj76NgrB{>B_`Y@ z&VKWD+y_R#j~G=<1H+=%>lVm%e2?cPF`h=QFiqy-;YfQ4acj7&%ffI0(332x5nsb@ zc7v7C-$ni>g=hzg;)6Y=!4*Y=a>UL`V;aVLPOlc!j|s~oniAk~?=WKu*jx%}8nRE2 zGKLeb#og8j1<91+Uv2;~}re5imSe2#UH$%3Cb8S>pOY?6hznA;LK~hc* zb^edyP#(?0#Y8|*Vmpl7xzTmxxv2*>c>|6?yQp%tXOn7VyljP(5uOOmp99vfk~wvm zSZGCd@8|3_jEkNG4e-JC{fssX`yK%+AnYnct+BE~R3ZKcX=OJsBz>IfXpv@7S0UVT z7PA8RJ#t0HsaCR+CWxEe??DgjY<`fmz{uz+>CfB$>d%%LUQ7RUUooD>HPx)KFX~7t z+>j%1mjG)j%Rx*=T(af7_4t1I&$&oO<9{l4tu;yFze(XT12AiSI-~~@GlQ?YSp8E5{Je=SbzS7 zp(x=H)p*w$(?H(bVCG94?1hn5XMc73H++idU|hUcwkiz08QHU8;Ryc5xm6aW$Qv7Z zLqvk!HluR=yk8hnnA4Li%pQHlqPKreYG;}b1ssU{BdOmMUwW(!g zB19Tx3pPP$nR7DcDc-=#Nj$AqnaBT#@17u%CB|jSer@hShi6RrgU4-esO(#1gfL6a z5SNcm?={4VS4nbqT2jpO|MFO=fVkj3?7eMX;u(%*Hel~Z0g5d$K``9%vv4=SVTXVv z1RTIr8;76bT?S2CjBT%o#W#*0Q4ow4@IsjccWGct@iS|SOIOI#0DB|OPP0b)iDEpT$BXMUhbroce@?ay{+bn=#L zGl2}9l6w=&Mj5@1}+@1H-LKn9%L7hmN1xxC3QWXBm2g|8IQr|0T75 zc*pzX8sNW+Y_Z0E02q=aj_u0X#bCEA+lLLiGT1OJoDu}} z@SIafk4iy=B}*~8`zzivN~UJ)2_Z3OH z_BGaF8ARxRXyo@4`3l=Ec9sBZ-Z*5sl*woM{B(czA@Mu?q7-I^)jnhM#Ip3KJ#UTD zo#`SqUYt!E^4bzXE^aVyMK+PW`w?k8wWo)mr--BLuqo8SN0J!r*cc-oVXC|=IVMa2 zP5-v=>|S3xv{b}PQW7~h`5YAR^_Un|mw<->DgzIf ze?`-F273yrRJH~I0P6J1wWrSmW{>}e*M8pw8r~ur9bMFVA$fPL6ibiCL_9-EN~rtF zc6U~`)UdEr?~NB1o$-9vB=m?7Lx$ zvC1~i$yuY1_Ua6y`FkLl@Ak?4bgt@zAD_a3{p&Gn%xUpbR*$Y8ekz7>gd8Rk6PzW+ zMY7#t?9ivk+{+zv8TaREqn6}Gf0-7%?>j;XQ0|}IQ|?vf$rYiPud_kKBr&hlAfbPL z33DY!fFQ<#73{y4hJ+AyAcUQcFIu-8!RqgISGc9a???iK2H4@7oU2#)>Cxx^NUZ`% z5(%~rrjhyv--Gwd7U2DMrvRH$gGU^gUU}D4yY16CL_?D2V)BQEpGIyNl#+b#1#$&A zT720aSov*6P(~{;9lV0C0+f5wn3O(OBPq2xXR~w6Vzy+X&VuNO-_QfIahZ>w`Qbk% zh)SZ)hnWKX(X)rAJ!TR;%wt@k=PJ#4H(`9eJjb&t>(K119!kFF!bDyiwu~fNy~v+2 zq5^euwey7yKM&cWk7yZgQF~+=8wUcOKrg4OLb_Nj620tSo%DSkF)uBxn~bWRqzm1# z?yL;TCMaMl-L5Uos|mizK4tU^kwt&HEBzMZ&1fc0sLTdm1!M-A1)p8zS`9H5G@Nc$ zInGEFEfuhgQ)gXA+9EmUy63%J86nC={04cwD6E2_64R9}$6wb@?j<1%t6_75^6^9N zMa{~x0MFh`om5>WdwmB-v)Xsa;Us#{svSnaAgGe}Y0RRidFV;;VL*_*Ub43Pl5T6Q zwP8sJ<7#5ce;XKj5}Ihlo=iGTmGP$I7^e_C#ypJNy{X-gd2f5)i*srp+efhb(}aZR zC2Aa&tQd-cH`!-Io1R@RRUu=(>tDyuL{N*fPj-+x_uB+NUUyCV0dS_m&^MzS4bN8c zwyW0cSa-uXlp4^_s>=SDZZF6`HBA~D#D z=2N&LLj#F7Tp>khsNZX+B7UexYOY0eJ;s+k60HhkOu*wtp=Ct*{2%DxE$QKb7t}RB zz0d5mTSro7`rMFv);+=g?o?h{EzD`mE$NSyD5lLn%2KpeHrv}~fGNFL>zSG5K0skB zANnP06&bD^<_2uJYWWX_uY?hX{l6LzS7iIGIA+m3qtxy0E}L3STle@RbmuC+j@V9I zL1UvAKq9d!e8~Nw<{&v0nhbEXE{C{*6WUIju?=AK<_@M4mpnW))pR>rEl}D{`$6=S zxt<(7DE^3zfGdrXke9qU^YfRng!lb*Pq>FlNzBQ?^gN_<>ZI=Z`8Rx%K1KsOfl$)d zfWa{AhLt#solH2Yk6k)3%F}6+cEp@>7t@EA%$H~rWgA5bMDkk-dCUp=kGnn1Uhx8s zp(?X(sHwm9@C@+Fy)UyOm77WD1?=ZjoqdCv>r9K4XT(P)hLN0`*vy=!Emi%j`>zzu zSG{KF<)-psPyT;Cq2b&>^7OB`=2A7!pdJlo9-JjM6D@Fl+7){1nWkVM`rEy zoGp)uaBj1l-(Qgh0Porip4I#r6*8{^`A*Sy6Vz^znC%WMt=V6EzeHn@UlzfT4YDY3 zTHNKDRluw zG49cTJiI4qE4@c!sj+BkiFH;)vA4W)OAu%Np!|lqS*nMdGT9wCXr9yoZ&91wyshmx zj1ykTGa0pawaC%1ldnst+HO$%s=FXKLoMFovfP51>N8)sP#Y=cz|^!jCs44a3%+U> zg0xktMMqK!%QK;X$1+a?kT)ON{1J&-5ugi7v*3$)V0mAntnQ+$w%ES~ zi__*1rl*`sZVsmO4T^fIHfH`baNN&w`@kl8I8WtV`cIX(1;2*i@o!H<)B{txKB?koE{RWZ2Sz zM=m20{g%DK`#DI-jK}llr7f>UFJ8Gd3yjy5a3$CM2;0MF7t|WoUHWu=8=lU zHc6p?dmK;U8jzZfvwn<8Ncvd8CVTEW!7o1jEubbN>q@%C0Y~Y47?<@komY}<CDx#61jfUTPqz>zTWcb z@O$QdD2>u#I*R41W>qKN*LkSKwAK0Cwr=92Tl5b1*D@aCGLg!Xs2w?z&)jMH|FASJ)`JD zr$!n&_3)UTOz4_yjq{E@VkkQ;w`yW5x`#JQi0?Omuq1`DMQ`**q4hM zT!3z}rypG_u~`yDP3u%v&pCT=&GjU)ruT$P`o|c+n*I#-Y*zKn{brd5Pd4jyP^mS+ zEf2=^bumlVQxnJReFsC|G`p^+(K`~7#Y`VDbN~zPXcM$*c@|qAVNr6moebgIE7e44)8O-svF`@R^tu1Io!Mu0ngsHEGVEhYTk**Im`{r2_JW}ib$xro+igq z@{ch&H1zOrpS&IgCpf&yb#Z65;uJ5Q0|7XkE}P&tIwbn5Du652DxMG=7yKp;wtZX^ zEUvVjpt||9f90ldi!a~&$?JrYUv|n=Hcv;VFAgL;9G3=KJuhMe=_Ai-&MJx*l_af1 zd=IDP2l@o2zvOOiP$L&Bp(fiMk6K2{elQ6NQEdvsdoO-goA%%XDZy*T?7}63@*7ea zH*c0Yb2kjI$R1Kx)(M85srF&UgDZlM@PKO_OM>XtIe)KPqf7Ptt zF-YSXE5H_BiIyNoOG$G2>It9yp+}&)U5AVgoduAh5I>y2y*3=rVt#X{+kvTK)|c949+x_2MeRP%Fn3x~xVykg+f9$fs7yVx zk8)M&r=O-~dWP%#S2@I5eDu@xAGh~`#2OPTTKI?I+7uA?*!||fbHAvA z?u@s-a`F}P_0Xhj{kM%y$#S3#mJWr6%y>;8_y#l-;Hz2rJvK2K$yX6;Iwk6yWm#8F z{lNAE=`io-^}?o|U#on^`T6Ekk9#Em#SuuoV?Cko`GHo_S(jWdw=)x07)$j-HP1=DIdi;#XW{t>gL8XU~qV;c@y1cn7Ydg zZ$8=MY4A6!zVkEc-U2E2U-uo~^zCG?APLPMvN%?FgHjxMvheZO}AJDMiQ z0H1_w@cWNajmJBq&=f*Fe#e*-`RDSpSv`2=_{CoAMfdJ|$nok;;NF8$fzq63Kj^~!h22?n9R66eIEt%%Arh2>sXKs=u!rw%!gXP+0aqfMW zKaTfVnas@RhU_Uff#KI)yraCYd~+gxJCeuVuGmr63RR%XZZD6%+mDKEJ>4Sy*MFaU z?OLSf3qDW`msLGtfVUY0mo~Z5Mv_JHmpfW=s0Dc#Na<>DGF0IV8oaMASswQ3L|8pg@rGSMcw zQ?_LL>=kW2%-z1ymE96XcnKJqGU|3|)3;X}v08QLrXHU16M|GL?T?=yamwg6+5=)d zOpRg`(qJ?M=lj(2fr2#|%$i%D$0aR?Pb55Mzxx|r@4hO~6t4;XeW+;O$=0B44E*y9 z;>!Sr)Uw!({1jjaPqr6WPZfLgz-+Y8yOcD}^0&khuPAHN6bL_xt4*DHl($M&NXBfR zP9c|GlvUKO!#rnSw@!-5+1nizV5B%B&S&@QYT|U=^|AmX`GU%ivFa!(J5ZeeknghE z$4bN4=mA@;Z1+JJ=Pkrh=H0~Tuh zAyIccj*G`4S6U&IntSxiQJc$#kcWYPB@ZvDIOhTVyx zj$D$c@(jKcg6tlSMY%Cw#XVGq16RWhsUYW^)X~n%NF7CU0ZY}|<`UPYNzNGI4R&~| zYqw|O?Z(UpvChl2iG4IMI`-94{lF72D?bEdNs<=cOZ79eDdaN)3i zxx?Wz2S$|Efyv~|(k2?x5_D7k2yb05=5t&RDti3A*`p0^4ZZ{O!_&U-tnU4w5mIrb z@VtQP$paxo1g0RR*rlPnVP!jn)yv`EA%`GKv6ItVh))ZP83cQ6wf zNgbRaoca$8>uc#a+)&xW!=ao=)n=3L`Lv#L1*kJqhKRI;AXbne(%9gKWy)Yhzsd9K zj?BR()nHIWVZOrtc{1>O_*<8xb~B1>O2PF0-!j@ziV8QwQsk*sUz!|HoEyHcdoqNB z9U+eNz*!Svo`itLZh~yHOT5vJjqMr|iI(jOlzg4SSUyo}p^b|X`0{*i=h(`k`l{=| zKW!rZ{M$~`mR5lT7L~m>85cY+Z4o(}oAly_b%Dw&FqrWii|(tj0k#d}4OY~+ny?A` zltOE`FT`1#ECk8tU`I~?_ab@#hAy=egcxoV9AEj_A~5_IhmPzLtazlAr!JT)w+gJQ zCJ!9Zu-}x`j}jmKg_}!SX84M~KG4CMsu|z`_I`G)RTz>Y@m@6fM} zFY&yedzXft7{!B7d(th)v|0YN8@#8&$k$Js$ZRJZX*XdnTHm(!zh4a4Aa0-44c@SH z9cS$hPP2ty45E5a{aDl@cAY9ln?*mde^FR6wvaCX=>nok3s%?@&$H28bF zz^sbxX0fJ1DCOB8+xl?Ahw;hD*fi+C^r^(3@^m3UQ%v$;s{lzzFai4~A+7z#kiv3| zB>GE606IfOC@_d5fJNJrQ;kKLQOJ{DjU{7{){|rTx}5X=lHdEp>^c7cZx<92&SKW@ z)Miy$YLC9XYKzX2sA4(-7B8$>pF|HWJCmDwIequL*ui}zeaH-F^Buyq*}o57ktt4T z^r)LGt!o3iK+iN{D*B|#&_7@v2I?~LQupg+Sy)cG ztu6Z-{Sk$g9|k$Ok@S=$>rP!3KIA90CVnY zw*L9+47XV5{oa3la+Pc!>-Fn~`dfRMpF#2!;kir^o2Yu%-_$8tc^X9?uf(bk(2#+O zGAI@e^=$1#v&L6PZR)knNVDmm91+KSc3~#-&CYx1@~>&E{svYlRi{-wa29yAv)E}3 z5wG1Eg0-jDw#k?KHJFY@$PkfaW!R61R0ep9LcnET1eMnJ0j-!L3m=NfS%b;hxH-6l zY3c2;3ZcTRTPj&@E^I+@R8tt1M*E!)HNQI(Fhp0hB%)9tAa$-6a(~6k7raKZ%{Wdt z`Mv;H|NV1Nvj^0Lg@x-Ok~d-k0wGkJeTa-L)^TeP{fo?x;gUnjJj6Mz5c0>5j~0>K zBf}5Q&+V%fmCL5z{<>mdM-s6PyO1c)qw5Ai8ZXK=5q*%hn2TsBG!Dz?_mkI1B)m2- z;!J0KGs^PP>CXVIB9HKEyWoK|A*d&$CHe$fga-?@_RIbrW)XjRo0PMW2iwpp;DnF5 zjLDwn^1*CGfX>z7S^F9xr!3DZ&Yfm#M$Cal^Yt?NQ;5nxh~WnQBoQPnKp@0@#BI4f z+J%Kdp{kSg@uibVU8VcLM-t9A#9Orv3sO8@Ul?WRwk!SLPqVE9GyjDinqa363gQqZ z1k>=aeBJcKc($(m@UNhNc;#O#KeEVgqscZl_AkX3qTvi7ghqlkoM2(q*3?bJ=jUu_ z{yUNxJMFj+{;=H2J5T;eJ%wwI$~Ebs+`yAM66QWW|SIT?l-`VzFc=VCxchb6-cn1(zi2KbddJC>}KW z{j(>(+UwXNCwBpu^PH0CEB489qxS3bcSS{(W(uIm*6ttKcYnIAG@TIrr8W z-VGJO)xdR%`wSOe*J;?Y1E8eIGHPJBlI}xX#8);l~%=tRiM}E>ya>gO-y*3NrnE;d} z$2&rs9Dxm&SJkQKyDxs#BI9amo~h}fMh$7Z!^{U3MoG z$EVM_JOLOSg=eVL3Qab+hW80zTXr8J2<(!td zoQ5yJrI=uD&s6+NWq<(L*7gaX#mROQZ~qpJoYLT>#A&hFQY1}=x}#c{eAD@)p19;+ zD9NfrEf_jWq=uKDc5NauQ3=(1x?axcwc*mPak@w7*SE>HlGd_H{T+2fa8LQ$AKEf4qM0F8Rmnj;Ree4t@oXBTqpg^(ABLR zn|Fc3!f8yALRI%#WYUrpUts0b!c^fYM6#yl?W>dh@Nx*l`Y5r<~=yY!WzN5R|U8j8T0yXWet_cY$DpCXt z`?0>f;bMLw&x*y9=fd&8XK&{`;F2_8<*ZD4f>$|(HbeshTWYGSTY$gDMI{Re#B&(> zQJEox!HIjjTcaBF4;F5+@Y!NAmj&TI66Dy|kfU_qZ79IFz45` zx{-`B2M8cNXaSKkRU3IG9YdWDwtaFTtQI=2KVA9Tvu>?c@@#4w^s6+6m)nuFt4oN_ zW)$xk#S$Qz!IZ#prB}Xh)~(dO)%S?*cOu1?5Zv#)+Vw{PzgsPh4#)O~EMBThg?c%@ zydo=e+R@&i`IVWuU+f_BSlTQ)``0R#4AmwJ{N#kidABs06yIW*sRMbqL^pO5+nM$8 z=#_30($j<{ztkUAO%Hkv)~wt7o$gYNIEvONK6jrkMAIj$OSH7v z<&|#yqSt<`M~lAD*tk<2_SaJ*vp(cbAVdTP?-zFDJ_-{Cn`klQQ~m0db9{F08p;n5 zLe!9ZqVT7T%Qjsc3rWPT2>IcSkWG97dLdS*JKTrbC@>>JOj6Mi`xV+h2{;XnGb$|| z5WARHE+}roO`LHIKA$?;-`rZ2hRE3!kcd5*_dB)g?ISvym9Xr$wzb8$ybaj%OV^k0 zvmhzYhToeN>OvG4Wv;buMN?$Mlqc8l<{4!SVsvCr+rZ%COP+*?h!0p8iZ!sN$ZUIF zUM%(4`1rxkGK$;h$uZQOqSP#DQOu)vhH9=27vs3;e$9W%qlR=Bs?Wdn@(5iC*s4Da zAv$Dvc7kd4mmBC&$?()v4!_m|&er2(BT`x=nsyETuC^kLx@r^52+HGjS_*gfI@d*S z;Z}ldxS6m$G0rp`{?ie1$`I7vBrA_%p)hb`OA(6hIz%&|kpADZvdC&`#1tTRlCQWW zeO1$KD_F0L;c#qZ*~ra+rFHFw?}F49(~37ilfTjVQy`m$A;%VFR{DsE%zcdSNsPk z-*o?VPOd@E1sqM|(6moOc}h76i84Z8mp>zK)KlT3`m37Gmuy2??iFL?xp1!ava_z$ z{d2EXqVSqqo}A|pb0*+kUlP~<)7#6B5lGOq@I()A9Yg__SA(MBjp@1XSZJ-l@(F%v zDt}#q+U?$F_USa9ArhOR+PjB?VkE~26jv~}?a2Lre8Q)W0N;q0wyvYLraz~UO;*0I z-=VGpsh?qvLP9H$^vhx;;tjV`A%I}pLoR?;g8@Cg-kqC<2gsBxYR(UjK4KVI(}1+x zBEy^St_6RZtO|5X$AMS|C{M#cn?2S~UQwNkiS9U}jH5=J6Eux7lze#MObkPl^@Nn;*l`@@?DNDS@Ca?UYPD7!N-|yf7dF zwj~XDBOXO@6`z5A5kW1G0Q^P(QZcB=cKfv;8703D8i3e=oJ;?=n(}Q@US3j8KE>ELe_Ve0HD5TzQuoP*UZcPG z<+30?f$~bCnW6E?y4IV`L<+zKcqMT8+sHyJd)`*79DleHa57wnrmO*hhxsr-)xIL9 z^IEnC1Q-mdSkhg6ybqvlku{~nl$g;1|y-$ovpZCXE1cT5JSJ*fu)d3~# zTu}RiCJ%#Q$nJg@he2cF?5r|I91qLW#BuLha+`i21XsRv(8E$1(xN6LQvv1PP`~5` z)NBG+scPM}SKn=EO|7wabb@cm6%mQyf%sti4CY3dM-r^IQ&;c)bV{7Ym?8Y%CyLxZ zahp=F_9oK!h@`jo6Ylrkdm{oaY8?YY5eQU;2TQv#-sE6T<*|gJkIjejDZ7*t$Tvor zPa^Q3b{UZu9&BI7T**IStDgZBhe5!=e4evXAvaSTNpZz^3T`kg5~ZT!((|gn%pekz zY8J+lz!BbmbWyo~pKu;aivZ;eQt&;=$6W;u7b9a1S1ybwk13g%oi>vwj!PX4q&^}r zwNY;%XW8+9U;=%2XTC3YxjN$o6iFUDv-zj08=eRbXFsn)_oj9;_mpv%?5Yz zqg10`gmHhJVfDG7&F983Fyv}0a%|Pf{@T}2Rhf2(OBc4|K^Jf%x7pv2SgI#f37(qc z7aX0TOp3=ICOxLKv9W1J&W4(s-~4#HOs*tiL(^`oG9>@$Whhb`Ds``%umMbQ_w8IE zr_+II0}E}lH4|#;GIdDLKViMqThByBrd$Q)5B(24=RM}8XeARp(RZUr4JUNqkk;E* z>cKF;zUHwKaIt@eu>>9!dw+GmF*XFqt)J92vFaurcDS9)oo4f@hBB*8B=N8$CbrbQ zgzbx$??fex9=`QGx@nqZl{1b>{?kRfp>=lNUtT7ZFWkceG$1NoAru7wA0A<3WEB>M znRF}<2rV(OTGg%LC>%^5<5wiTHTI6)ry6-C6x&a{ty>4#d1nLQ~2- z+S{*y^fl?BI)7J8RCP^bW4@exQ%YkaxTgdNJNXFjk#b(yK`VWOV(k`R8dK%pleEs{ z6M5CZusq=^uj7{W4aKbbOG_3-k-*i}l%BqnPQXZ|&)pZw4qhM2?@G#*0L_vWSW`c! zBPw#UeOWy110t_0Mtw3J#$P9uId>EJrTUKJ;4Qr$= zUJx-z6V*U4^knY#t~pqxlbT37<;M!ZU$(k1rXv{eEKfJ@E|LTXCg{`3aLf8!xxB>5 zp4-@yu!5tkb#B}b9gQPMfZQ5c`XEmhbG}n06%_sWXXCFt{{hE=To5%L>P!%!hjlh=GG}*`h9FOO) zjlO%lHX48OTF!oP9u;2u`_-sc*5Bt~=!F0=&?x4M63!EuUuO0(eZr^x;T;YaE;^V1 zUwtSJC<{^ejg5}9kl3X!yw8I@+4Q9<=K5%E_yi>zUZ&O1*x9M6*h?zj-g2zTY}rqn z7=_#>;rTj)VXS0|AmYM@F5OW;)PGR~QYHO3HNz_R82DnWDW9Cf<8`YMN5*;(%TupM zlppnCV6(cYudS^zkv|!LRP56Y6(FC`s}jgBTy5!HtE9$vWCONO1F>LU=7ToNCug^C z1l~G#Z37_u(a4k6O0PP^o@K`9NUNTm>+#Rj78W!jB`s?LwiWDtyfHvYNlE;=34767 z@otzyb>3+l$l=Ig8e*_u!Ye0Ri6=hG361#4R>)SVLAug;`F;HnYVqK#2bTq9*GclBU)aY;;jl=0NR~%6!6I8A&js+>Mj_MJlXzmhzO}>EV%2 zG+Yk=N(!j@u#G0a5>b2VEOSLh(_2C3R>+wNydi;0x%1H+=zr4H%#C*Yf6P9)3hKcpVL4gVB| z1N>Pk#WF<7G{0|4IRiw+0o;k1nM5CRnP)1O1(c?f)nX5pB8}sM_Yeb>7klg<45Z`h z#$aVpF;l%~Pi8u<=lJ$4he7Tmf%2ya0Hi%Vs_@T`Ga2dFPCS4GpZ2iKH|xRAZ!71s z5@TomtxC5ycJWV1i2wq)=hvHzpddxkMdG_5RH!<~?`yB2UZPNdyA>NM^RQ$&R$cHX z`mTku4beTMkZ2jnBv)unZTs1<(a3};Ho5t3kEj+`ja-g*=U?b87Tp5`Cb_m{X-oBx z<0zj$gX%v&2-6;JKp*t7$2g_-^TRNffVp88(eu!d7hzX1*-Eudb7t~1MtDU8E+xfq zd|vkzR@<+OazPz}1M9y0wkA@LGOqw2yQ|Dv;())F19++g030_bSb50_Q$Qx_Y&Kog zg^9)yF76P`8LD{2YzSZ{1n*-wOROW-<+;h(WS#)Y7l}j_ZY%FMjhq6lzW!WSbzR5R z`xvzcYhhB*?gW$@oY~-=uYm#{n*{)X*nF5=m`D%&f}k$?UnJ5O*7OR)$0}0hJooPr z5Ag+1X^*s@G+UHT-2D2%LiPFS^ z7KDzE3ttb@Z+@SVTy6(HdbwITVa&#$1i&+%lPU79^ZHBFDkP7Cnf?Lv zQ=e5z141r^iqdgsq&B*ZC3(R?^ZMO$7aAJx5+7I-ei!!M8g!WJoeS81gT3!@?QNt% z#%z*Tm-3#zyPG(sbCadxiKYh0uTtafUPZ{my-d&h+%RCll4Odb@;Nr!zdJ$<9ErM# znr$2Fl74FUxh${Kbvye4GUxb}%#7Jn$eT$`t!|Q-P9P7ySW;vPsQv{s^l*Ri1LOGP zDgFy{4K;h5N&NJ8s)<0Om=d3gbrbjZr&#aXQVv_^?0s`Kdq*;EIjCwcTtqnAQU(TE zYMXSty2CZRVO8a_H=#dG6uF{ExZuAy&3rZ~z717)4RLcv$JqfEWY+No)1-DwW_oGs z^t*T;^`pK0%6+%lNw_dQXXzYL<)xpmMKJ8AjPRT&OaEQ)k(FPA$sl3Nf5aX-Gn}Z% zF<8FMOesP}AQ9G~kYfONU1gXH<>B)4JpLhBGRkf9Q9O+lM3j0RRmFpl|roXi;V4=jw08_RSX}ou7QlaW8FthkSqG z^|BwS{1jpzbMqnO=wdp7p#MRbkDcT88s&=iFDA-8nD}3sn+6lzS2^3ZF~X$Vq@$M~ zLcI9|?;&T%q!V0)dwV8=4UmUJv9NAz9BFcS&ayu{ogTrB8_`=tJc|W{0~M(`BaT(P9I@% z8F?soj6@t~o16)f2xD@>NC7`UUjNRM{&op-$U38g1uvnh$jfc&-gA(*)W{*0p0_p zE4LF!#iaDs`Yhz<&1%akcW5) zSq!_l!SJNol!m->p<5pAVi{n8&L_Y(x^~gH7IY!q5=>R1XlWWGduqD zhU{&7fbtAZIAu*o0BUNoJI;wFjbi`Ov%mbO2MXpf+;|LjuZ<%|KO1lJc|rm}d0L(gO+% z)(1xXh*aYJz|bFr34lmc^SXJS4RytsZd0G`9EKRapI~DLP_*(J@F%gg9BXzgq{r21 zD-uv>IKEeCNSC|Vd3%q;xIs!v0BG0IVzX{WvwlDDewB7KnHK0vJvwj*x;;yN9o;44`oZ3u~#>~(@ zAVV_ZJP|O1ILip{F3MB$l5SE?M51RTXf6Vz7YWic6;JodzWD@5DXc%`zS`K{cw^S@ z_UnmPn=b0}(39sf5g_chfiR?nnZlDJQl4&Ilx0N7CYxEr!(ct|ZUl&`O)_v7nEl;e z)}Kux8XmrS=u!%!==KpCF$O?RuPP@s5k4Hp07hKwr%?~9;Yr_E5({{ruMK5L$G4~r z?kjjFSa?^#?R1Fu5yd&7yq`as9GyU4G`g1k4aP zRuCss#2ki1-YBOsy;ZvWUz<5A1Lfx!i z!mNM7kWNx56qX#BvgShYxLq>uBT)7Ift<9GRM~sup#K9c CY21PU literal 48974 zcmb5W2T)UO7cL5-AR_RA^r|5=5s)IisDKcv0!nX&-g~bi(gGNYfb{r@@l+&Oda+?yFD!_MCCyWY0ev!1oq4u7jEPyB%H0S*ohF+@RD9R~-G z69?x`;eCAIipya|4)Ar?!JoTmCi`4dd#c~z3Wt49@-xA&dDagv)|NNQtvu#|mXuC99c$}L#zd-WdwXeic zg|Ee?ID1Z-a0Xg*d4z9DSN7k}S_sP&-allwpTh!>+<&e`L{R*DC7kQ2`oCAeWrYg= zUV%4c|9`&;{0YYA0^>7;4(@8l$bsJ#DZq2hVU4*g9ICrcEgdw~b2Q_&Fi`}}aa{D) z9)FMNiBozEjBf-ZvblG5?p}fB0)gO2jKTc*xIL^$s7K->j5F%+z!tXLp2Cz7hgDN4 z>78PR4kq^`WpsHnkkb!qlfE>)?@IIlsw{QD|NNfFwC-gr-=?$}lN3V(U4u=g%U_Wj zN$4QDw6^qB+Z$O~Q12b-J5qP50-aa*S16!^6gH(?U!ak~ytpwKkty=Xpu5}8R#G}j zE1AL`N>Xyhs>Ara&f0Qg2--|b{fc@uo*qy#wpUNp-E-=HU@YcHc`zSF@>KSp*{{gH^IE5T095fv$*ON4;5cOVG_Gi_dWh2LdD^fb>FkjCzfu3E#o*Fu6 z9WB&es7J(G0-e#5Vey zd7k%_>}gx;Tfn@sx2go8gE>7(Z=i#JqAP{mTQI`nOc~ZNuUh1_6o1dN`&i`8=}pFq zo{~Qz*qi8OyN@wj3uBdj@k}gU+Mz?S)>=0$ip-73X%rgnTHGZuyfz7II(>bU*aBv&||I3lFlCNR!FhfC4Xm>ZfirV7S}X&M(u@8OFx^9YhVF|^Pg z{NpSfot~ssNq_#U9uaDz579XOV~3)CblO%h+HoihC!nXkYyZtkXS9nl{s-Jg2YzM3 zIH7^qtLJQwhj*&%dt-XWF*uh zTMXNoNe|rGw@UEbaYOuj^W9s}EdIT~JY29}cxGsc4I_7*d|40tV=&%Q9ZBeW5|uY- zW$DWE-^vlW5r{)d&dH#3%Tj|N-pto&Dx!CmTRA}>e%FU>L~?E&B)(c0Qrs z_7^_Vv%%uGkftv7qEEgqAJYYLzl^IrrEN#*1>s5d2!3Vl&~MKeS=Gaj;xh+7CpO|6 zTfs2V9}HQhP9pWG8kVtveEIi{I?pUJM+18Bm#;H>>7dbnJTGvKtN1pP9{2KazLAT_ zy+h=Sw@h&<4dd&@Km4wDKkE;TsR;`Rv|4@kZGO00IP&OD+lFFmlQ%6iTA)Y50wx;h zP^8ItAlr)wkxXBjmMgt}3180Wy)%E|x%7=GfN7p@xYSPqa~<7#U8`$@ITe{29Z90| zhFLwHoSK{VzM8m*g8TP0(9)Q|`u3fCePX1UgUck#ToH6-`usy_hRW z!B}wNgVv5N;+34I_VEAGo)7(!PdcB;Wib%Qd&dGt3ujrV#}}b{=_rWL6)q>h8OvW% zYZK%WMD82Xe}Kc=cyF2baQXhx)c~+5mI=HK?a0aCPmD`X)|%ly6pm5LG8vu1NXPKn zQz@Ih<&xU+RW4n86x7@GAJnMJ_Dt{DgK$DW36Iyk$jT|vFKs;{WQFxp_;VT?i0FVi z3?Fp-e0<{E)MgLYW)C_w7a91wquG}qd1ubySsq|m;>XAohytNO)Z(Wq+vvuDAYni? zx4LJcd%4fPg&WW=Dags1R+Q8_tTYF#d$;euXYvhsJ-zHF#&l^;<8UIJIYD!5*6y;2 zL*ygjz+FMm&`%S$k6921f^<|p@A-#pYHpbRlJ^iTWDZOI0_yZJnDu> zo{>dKG7Sapt_N@3uxPnxKR)pDzYfBZ|gF#~eOD%oic5#8s z3=oHKe22g_N4Ww^YGdv!cWWtMh5J7^jV^X_a81qG^12YiZ<~$3VR}Rc|Fwf|KGn%2 zKP%(w(YC>?qO#uyqkH!04;F$bxRCRQ zLv>g8lFUjaX?A=tB2>ytcYV`qPv5T)y_9UrUdQ+CUM!lL%lwwBVpK&eYoyED{4eoy(CTD;uj^IMDj-jxas+N03TKct?E@HtKH& z03<)<+LDt&__KI0{rCLweF;Sv3USl%{mo0=Fel$P2)@}U8pwj*n3JZYJs4<>zil5h zqyNB)ty_2g?!xFH3y8d4H$~5CVL5CX6?O6!pOsxY^vdMClIhKBU>X&&EhR=3T%ax` zp?n-9p}|+#zE#duU)HZ*8|<@!$fsKjc^#g41<#S%IMK29fR|)H)l$Vw%Xq&M(~x=* zayZ`8>#YC+k$)*g)mI%F(OXH+sD5YRHmUBjTa1TsnG92JtvJeJ>ZB<-L9v=b<%@$s zF{F+c7OGpna-v}uiI#$%t%f9w5?$X^^t3TYVmOpffdN>fE=o&$ycpxyRn32Hv3=G0 znpiQbdHwDMmsW}C@!XJdx~S+6^eys^{W6U*L+H_qI30x#p8bVsld?Y4!=T{pS1vR8u4);$fx zX2^k$lJQ}VA`A>Z3zap@wgrbTp(UY!U3+2;%+zJ4JDYO)*MobC1yzS;H1%2MZ(Y%) zkZ3g}*}Iljww@7&{!0|Fh|j$fz<3jJN$lWc#U(KpUU;1=Ca z`jl44mL@bxq-6f^NM}O=6EJc(g>v9CIr3nr){nz)zp+B=Ovih=9iYX-j}pBEZV1Tx3=569J%0F+F3;n1_IA+%l)rs?>Z{$ z(rJv1H#7hB4EApw-|kx6-*;#=s7&q~oDe2`qE8_)zVpIqK1W+XW zXQbdjJ0?>4`T1u5IxmSQtv8>Pcsg_k_Jo(0+)9Aup2i-E$;UI19g_eDC)V?4U%u=w z*2OpwXe{RHr?39@wr@Q!L1IQ)-eTt==daSX26@x^oHSyP$?O?fZFT86F{0nwB{9Mz zjAC6Q0hh;?4-J5c)&y5^k~wqcg1Tv{8ENKwWw6lF>i{@yY?y*?l7t;U>D6^n1qOoa zxp`vIu$gV+>2p5ca@bD~<6!C>LEngO>3gbzLsMe`brh=zN>DjWSIQdB9mE;Cx{;vt zBs|mTdIJWL%fIH7#gC1PLm);Z8Q?CETP+IVg#XI-9Lf6{L1D9Jh?S;?ZsH|O%~6su zCICDC>#ct4vltZ~riYFNMFxg~gc-=l;GMS?2OR3yw)6d=!~0<@noNyh0c~%aA1D0iiGEUaEU=JRsD3-_^Rof^M_PRHD4D#E!-Z3 z*soQPJMVdDgQTFR7i+=6ekB?au2KVN?ZVa}{4YFvL?u<~G!bykU6+H_fw+ zeF@A`IBV*@A;|A3t-(*&5pA1W+muJbkJVQEA57)hEiL-v+khXletRFFnPj_vrcnK~ z18#a>_n^Q@-OzAfoas(&_`?h-88R|vGY#L-E%r`8m%W4QeH}RQ(3k$h3;OTR@cnp| z%DfS^*JQ)d#~|DU7>`MRLH=joyxeLP)9v?p%B>zx^RyDGtgoLkwd{3rL+e*l9Fm$I$HwjE3$RRrY`7@(i)JAKPDS~kzC-zu7kcOM0PN@)b zVE9Mcm0#a--@pKLos6aEqT%V33_1KC&Xk`y`VrEH=_*gv!TS#<*sA(JywlR4K8Soa zSrE^1NUI2p^eHlf{-*Dpnf&3=Sfb9^2y z#<@{KrV~H-)MRkFx57QaK~Z!zoefbGk}S zwR>AZSpPuu&EMuqrr5Bqz$#9O{=8pS_V(t72qjAw#%^(&r#h|9F9g)mXi+9|@!xzP z`t-^v3M!mRU_1SKczvP+n

    ZcFrH*bs~kOhBK}qrVBooel>zyvFUAb`T5H@#&-0V7+R(V6mFYOq`8JH*tIC> zUrDX(OaMtOJz{>=NUG_jpc|&Y9SnaL);Zp!Qq3Td~1*MIau52n$2#>9AQU{{|U<*1}PA^3>91?({@J% zkbTc?kAh?XktF#N6K$w!!84>X^dyZpy&Av>U~s7oV^Y&E*`8LlE1{2D-tLFh3}UW@ zs1dY-cLqkdm9!&4SicA)2PWi5xhT+&fPMOT$58vkwriMrWEL>bfG_$v$E~yza>Bz8;26j( zMUpe*br#T4^i(ZA`(*R4u&{@$W*Wx*tb`!|4<@2JP4``M7sLYMpNLoK)Zl?U2j)c| zg|b%SKY}?qZuRF8xe~^h? z|GF=<_$`Dj4}6&l38L?v%KKZ!{&>Cpb!@t6O2J*5P%w|Nl+(d2oQyEN3i9Rk4~sZw z$QRv00ZwV0A0@Te4Ub*q{b+gcE(s_6PX)Q9qWGM~(hfNbIk{P!tigZo{?5|1H9{~& z2fg{1h-`tv&dUSK$q9o3LMZ||-4zHiCkK7;?*FGc%w!_f0s|sb03sJEo!F}L^t~l= zV>4rLd;ySyb{)zqOi<;N+S70aQdM=0hUT#^mlrQJ=2AX5L|nwzyKhnwhF3wdd2Tm3 zScZE7)QwwGH+55p96?3#X#^GrN(HYZSQxHOwztfo&bE!D(f}Jre7MK@{&O>z-4+Ua zB8|-+(}29u#os8v$9Q8o_cjfNYNqO9-^WhA$TMgxa#qN1sTC+!7>!rq2dhh_#4GW~ zzvfnV$4&8Jn17f$y-c*3F0EyBMqZ_`r)bkkHE&VSV)(8b7mVc|kh)oZkf;XQ9e8#_6y(m~v+5eZ$Ihy_jRWRFND^ zKpLBn&^_Y>J>NEvHtQ*{=<@dLdkOUR^tQM7GXP!QI;=dI3p6e{eEEYk$1hge11~|+ zf{?p_{hmE5j{?j!;D0r_md>}DKHR>yz3Xo$41B@PFKDL9bm<)dX3E-0hdPIsNyS}9 zXisI&G|^1pLRubK#bfO;QFV|X60gQpuz5o#q~9F${nCabBcIb@d)`30InvE$iqGtV zqU>yna!}HShuQz&3AO*~>!ph=y-FrS{OZ>$b;$s-(uEEt2F?dDotqr;M|&*!xzJ{j6%dD%kqbb|+#+qj&9@>zaF` zlevf!sYmtd2z`@bIi~~R)QjczFOA1rvB}A^Zo6sk(T+C@$BoEMaqTK+$V2(Wvv`#; z$ZW1{>0rS{aJV>$ad73A?@3(D5XijTmZsYA5C?~hzgU&{C1EaK3a>rs0W zP3HSWDyDrUFoqicUY}^EIoj&z}mlR0qYfD2g1n2td@-NH0?lUGg^ke%gbr_GZ%?dIIq*2eJ^rG zr@DF`>A}+$xAtys^r;TuG234cmp=0|;oKJ##=$JOk!l&ayh!vzRaFdMqxAe$TwDY! z{0|2e9=-=}aHhno6s>jK;OU>#7JIfI`AMjfl9UHG?>9smEnkBW<@ZW-bfs?Ev zQ$MyyAAr_+;ZR2V*Gr4rsQcPuQEj_Wz3p-w{l>8iRHOuK7DY;7|Km4H%g0jCHt8>M z>7wC>?Mx$C@X)hi;dU84z4^>x+L&9GiBN~5dH9RZv0)}Bs%==>Yt!^iLUi`#ljZM< zSfQvKMR_-IHKTI0O&`un^LK1EXrqA$GcI6S<9YJN| zx{d<;GFkEPRA#3?O!xdN($??|!*cnxRC_xHqgnQ&OB+GP61@_Nx%mpX;bMP|_1%Zf zd@DBadej(3(ieTshDzJFPtS;@8=s8HP{WyS)r_grc)9&gS&{^0ypvnB za3ft$;3p0Fk5D355-vX#2*FOp*eq^dJ12@$px|JW-hssP26vh`kt*&D9dr3r5)4AC z{R}@CB}r{16tyVxzApL+@O zxHA_wAL`oLYp0Qi|KNlARBv}Z)vzGD^!1)vqY0MS_h5+t25@YDa^yo@qX9Ys@NS&4 zT(!5AGD$e9H;2}ixo#v^&UsV(W&=@Q1eEc`(`R07Q|4tz+?W!x2*|tB8)X{!Tz;=2 z#b6T8kjgIn*Bavl*B1%sa@dNctm*UHd64HXfBHXj0Zau-EN3#ClwW_m<)OkOUTTa5 z-Q5Y~A3YD&5nkA|V+qidYG$4r;z9Gk5MPbHtLYi&$xdy!M>|Zm7CScT11S2fl=#`Q z5fIx}yERf>0P@TRjmEn^4id|J=z3#3O>9A7=2&>~k6}Hvd{ZzC!SA@|B({TYd>%nskyIV&KMsb|_(p!D|n`t~!{vea&Wl zQ;)A_!BWLjUp$0-VSUU2B6k3W#TFg_n2Y@EN|aru8vfM5WOMA0) z=*1v}ets(5B)xkWGQSDr8GKZl1(W*#53&7RJQ4o-(vHUE%f@rxBS(F=vNDpVh=PX& zcG~oS=f6(5N$l;rTGt2=bu({;I8(s>fCrQmc3M^+H1|BeATO>>It{+T((L^k?dQY0 zZM6-MB%v?b9Ov~T!W3))ih39VffZnr4-2ePSs@4m}7>Wp*_I7Gs ztYlK`0?boM?GyA62$b~>=q4MWM2$ZUmXsTvFFH7rRLTaT=}AdR~ruNI{Rto!WkTw5ZC<+ z`ljk*_XC_aOrqok0*7sz3--dLL#3MbrKaQgDga;ogo~Z)fJn|1z@_BGI-)wa6!u0$ z!tQc?2Eg{3ZkV##3hk9G4Mk9L(RdC1D%?_E^Lrh8kS)2vV2LPEDHe7s#oaGfF$MQh z+ovcIE~fGhuST&t9E@@Ct}45%JrA!}?zd@eGc!~jm{3zQ8-qarXIWuPLkLncc)rGP z%a4mYv?7{LPyqi-n4$vA1H2bz3b5$1*VllDqwK0G+I~JSY-MHtM4UZELsdL)I^HFK z-ki8A08vtBVApWk@JmD@#|BZ-uaaP}wS%@+-#WYb-|@J$q6C=;1BvS8m{pBX{a8ag z-;-%vaR!pAPk|g26A7`$deg0N}JBk~S23P?Zp=##a$jv$7 zK^#-)a{Uw&b>kDrQ%ieW^XqxktWi62>ge1;|3&@u_`-~`{VV$pw`M%f6#XxD)xMtd zp*3hbv{<^71mnCpaFNP21%j%bagTf?VCd0eZT>V0Njs0fJ)c;DsTA8 zup~70A6RWCmSBnOt`WW}fpV`&eXYAuK)H^;apMg$xK&^1lHa|4FMoZEyo!Bew5lupI*OTe$)4 zedFL~$H04@8wH(&z6Y@O)`fT(=~;t9c9NuR9pP0;7qImd0D4c>sG?CYX~WRo#%}Sk zAv@ncdnrhKfsMsR9*>FacND3*_jBW%Jn?%>X3mS#p82_2jiTHy)ths^wX8|-4-Y-I zv876XL#hL??I;M;g&H5Csu<=R!?MSHvrLugLYE{zyXoX-@TXCh zmuDU>_LLQC;zXf^UN@sp(ms@L``uihbbIQnT2H?)jy_#G$#pq+Nklw9e^4W8U+Ft_+`xlY~0_D5EUs@PTe5T zG4xS<#X&Jaw9=5w3r|TyXZDS;n#S_tXQ?-u2%td;d(Tec!1Gx3sK$!d(tfnmT&rUnK+m zxsObit`?d?d=(-8K=jeZE||}p%TYOC#A_p2%kzY{{8Y^UB2mp^|J()P<+Ckc<1uvN zkn`un)2Y7MDPKG|{b;Guj^N(C?g@WQxs!6~pYYWV)X|#Vmspmb3g|qEz1IO%S#kjJ zdk;+$e&V{J*f=ZpnW>8Tw2Mgcy7Kb9{%KSZ{6JO0oN@%*CAVbasFk~nB|q-siUkblh0 z8MS%wv@E-)=9NOWx=?q4>~Q{1M9*7|>I6CAD>(ClMC1=~>7S-FB?Ga09HzfVurw1i zcxL0%ychQ*?%C|0gHu$cZX(-|{xRiP#DHeV&-yA+Q17MB z@WS!d4pBf@NHv$;^8gcjw|EnS(F1#ykm?5&hK}_<7imZP=ZTBIdtv5l5a^+=Z`i64 zjjFCaT{;`7kI}+FS0$9zZ_skT@sP>6cZ2`5q%wCSch&0jEwaj1@O6MA1kq zl*wy7^zE9-I<1BR*Dr+2!9SOV7lF-8tzk+5zVwfhh7z1{U^tN9xAp!53gwvln9mB^{Y%TFl-Cw^%FR7 zu63*;e9T@DSWLAv&7Bev*?afKQS9?2;H7jI8xNO88j(l5#X!y}D=3x)FE~~{WHM?q zoM-hJ6T5584f7_w4A@S4ZZ9+t;XksEE$tfLXY=TP{#|IlV82qbHNWNZ>Ej)Dz=_f6 zYbfqfq zN&bjGZ5_&Gey^zO&&4eHp>t`m*_fu~a^GfmrZU9@VV~V}VJ~FsnPxSvigqs>=Ebh9 z@ln*{Gv5v8p$myosNqN>@Q(2g$4nA!!gxI!%o}fG^7LT?tLdfo zZ%_3?#zi=JNn7+eXLu`_7aL$$kLyECtTlTy%)mfnO8R22DzKbbaKosvh+L~V^1GJx zOu&f01QV&1l@$e}*5`q}2Mu83@g0>KpG5|J4W&gRsiVE`#!p<7h4u`E-=poaTNVb0 zcqIaRHB{gI?%rK&jxZZwGTxaOZ`wO(^}jBbI9tK!G#o@5K6wIil9`QvjGv;?sSbpw zV%|EW8vRomq+q&1weAcs-UO48QZuv^0WV(Y_4P@7xvD`Oc&9j-nj=q^5M>6;j%TZQ zMzh-v{Q)sX1aZN&D!F;WE@wZ#9AwV~E7&er=5I?^gu2 ztmK~&1zFh(YFnXuHcC^@=Beu&tsA_?Nj+f*>5;Hl7h0z?YR4q2mPXUrz*)g8H=&Jt z)XxDUuOg)`qe!S39DqNFVX22Hn}5XA z0?sKoQu5VuY_9Iz%kxZ7iCz2Ifprt&q`P-*X=iknLj*S$ow zzBjvSpUTSeqf<>pA7)4}Xr&PWBrc7VnTQ=?PQhVb7JAJyHnub4b09z?oSSFnN*`dg zeOAVaX=y2UJTP9?*vD~md;g@AhziJ`ng^_rxYSI@xK3}-#rdsh#v>#iZ)YcrFeTWA zb=w{5x;V*aDFE5gup>buPd|lXy)3llC(`9y5Pk4KN_6^0vi9Y{Yz=_D?2!(3Zt&G# z01WR}hsXZJuO1}ItZ|hPUO4cXo744{{Fa=o0!R{wPEJS}Ams_D8zEr@0xIEdURF!{ zpn!|J5==_E-?I)8+}eqWfJ?tCW`t^=^E&DjFI@H_>rHng$AKzELe!Ml{Q|^T2IT)k zpw3>N8n*0h*I1}-F4)NPRXLH**IlPj8+ry-kF>#{(V|5*S7};5#>UGnrppu`5VgNC z+i+{OH=bxRCWD_nzexIhbwE13YVCC5Z99Jo!>umT2w1GHGv&6}O8;0gFs8l@LVqUo zT=*tfu=;j`$~I;1n+j^WyntmT*+ysz|H>;?s?myY4flp;3*{f$Jztb!AR*{wqW5L)Ove(RwOcqHCK+mZ ze<77759Nw$l16uXW8(Pg0`p@?)v<}K6b)~l`!?|2znb1C3vj2jP-%!Jb^dU4xKI8j zCZ*H8orM2|!`_MAl&8LE2Gi+#noXbUSXPYFfiX{tQ0ZE#OBn(%bKDjwiatJUJkz+} zzgH=!Wz6kGBuw3l1oN0_d?aKm1_X*zO`X_++R?6965m`lS_r9FrVVY9VY?RNJJT&r zS5NlYp|(#{n3*$gX8ruk$2;frm-MS(RaWe-rop@^V#4IZ8QKcSOP^*T@}+k^cOCB< z79<9=9cby&ed&FF+nVG`n?7!H`8`ib0gInIlg|to=ZWK3FE&R=uNY`Pf#gb+AF1We z-~Dp2{8DA;1H(G9Nc80p^HQg&NCgkC)t;9~#8FfJu6Q}e#=37@fB&*ubFAxL$av!x zmd6ahH&*rYeI`i`Ku|Zs)^pO&0L^a4B%WC9)oNE$lNySEE7{mQva-6Tsbz8MjZH77 zhMxh63vV|9L}Hc2Kgo#e6`muSGOLQ*QE11~!{>H$7C1&ik~b5iI@rs^Su2lN6X&KX z&VtGM4<*@alaY3D@h+r*~)}QyR!d{V^2(ms@aBKO$`j%6_$ZK@gMy2+e+W>#G$D*z^Gif zM;2uirSOYQd1XpF<_QyLO6-8)RxgZh5{neKxz8DTL0)A0-Dyl`-GbNxF;@?0w%AWY ztfeaO!X4Xon*LoFVl^`_4=(9L=Okj^fiei>6Rxlrd8MmIy6Py>R7T+?u{E|tVvlLG zou6tQ2nSNlHu~G04~&_(w0O3sW;f!vRJ87-afkVpV*;?**?PZ*(^cO6=AM{5@yE6q z@&W-!`B^Z3jb?6cU{lkN!n9@}`-C1_3!h`WUj8F=Y`Uyg5u&)&G(R;JUQE4D6AS92iP!e&+*sJM9!Cfi{DB% zaHn(K%bML9Pfhmpv{9W&g8Do%Zf<0hl)P?|`zLKAf_S10`Z}6g)NVH8GYb+SQ&Y62 zW~IALLC@b3Z44h5hAz0FeB6-)Oz=0R>3pz}-ZXn3^nwzoq}%@~cBS_YD&t{-j=%@2>O zEl2Tn*VH!y+X>8N!+gAHFjcAo)te`+5K617^Rk_HC!iN<8J;LAAf9=B{wGIm_a#)Ik9@$#F?r&v*)?$6w*+d()7KlY}&tKe_7%jA#QoC%V?eP#ej+pRNvO1c;6o0QoV8+vCzv zwRRl=xWcYi)lrXBMe>$8(Z=E>cV!rXx*~5P;S~I;>Nc8~fxh$z{O>8-7qiu6L4v5= zIWqr>EiIw&huoB$5RSK@U(*@i_-eHI1wc;T7sdlAM?iMh52oU5cQ5Wsj~ew8XYO0? z9ROJI)cPHO{IAm5r70;Lj0gMHH&q|ATRL; z-kW+I`!#PGuFwBo3{VsuJ(pn&7$IS^_aIWWaGyq&x1QdQii+x>Tlx5y+4An3rLApV z_8P0I#39jjG;h*Kd9_Txu{1Ua&|k4IP@0P0`Gs72V1e;9vtfR}ZLgJpsMDH+na0b!V;6kU z{($J@DhsGZ3+{`cjP%;hD-c1#B9mU&-~xeT7^8TULG#n|Ug`8FEJ$&YG^As*bl1kk ziwx}5G@t`uxV_Yc2*s`VE8||~KPP~C<)E9L&`?#y)#7eZn-vT@0;`}bB)HROzUUnSb8-K!@dKej&``yw^!zf-Qn7&;>;vGEp6F~gZ z@0=5i+^HS5IeOber_9+@G*t77tRywE9l9E*N zFD^-F4(j^yyR-DgpGPxoeg+f%0dPO0N;sfBntuL7r4jp5m18FC0gn_k7FPuU z`+t6)3seFa!w%`7{*Q`qA=ZCd)He=Om3+h-7PqSG^cJoa}|;^I0hOqNZ$!scm( zcE#7%OC1kt2~j z@|GXv|N4rOvnNSn1p;Js;E^%HWi^d7#Q;c5Zm5?CdC7 zTi20@Y}`QTdp)cyMRHK=+L340^4fZoV{yOf+87D+TyezW z#r`6k$O5O5u&t)H13m~~;(FG)F<+C<@%Z`s9UJRbofkV>yb|5!-DZ%D38(IkBEx3$ zb$XxX+=P=1{N%vffXJW!rF3CmEr3K)>O12>^eoa0;km=S<5yRxi`}>6e-16v+q2h* zVWJ@|ExfUCKH$qGNTaY&o5``C(btZyKsk94wKW4(>)Cb%#!)794C<3>jMu&mQNfKr z3&wqfiISgjKcCi}XBxCw@`UnneR#I-<~vn#Z!o#~aeRH^Kd%1C-Vz%Wlm>%Kt;J0S zd=LJ5a%_zZTs|Nm#SgeK_E`KEd$d2#|EIhjH*rA8&Fx)>fk#fOO=VbDOy@c|N6JSz zU~BG7?@U%Q1wBkS7YO#&1xh4T??eQ7!1&UdqxhzAlIwb@PF*(t5J^)>HLAP* zPg>#LEfA=&Dj@Kcm^m&eFd8W{NT8tdkLvG7eG+TO#(M@d?+>Tc`*m<)g5tkLGduQ6 zj-uqiS)WstGHoL$fiSSu<711d9`E}=o-(((A#VPu$SFq!Z9{AMlD z9!|C}lmx88oxa@x|UBqqrv)!YEbSxiK`BBBRZnJ1CUR)=UVBUhL8RKrei-7^< ztxii@M%m@$ZAdUg>+9;JC@67dHi-%RIri9ZLOJRy$sl+=Z_b-V&@E^yMSa;%`TB|s zK#DbC2q6X($G7<+^QW$lj8P1)K}6X{Vm?6snQgRRrm0K)SG}~yBqpRyxYb%;Uubz9 zbfT*rR~Zd7_5iMiR7x9xAjJkUibrrfYf1m|rI|*uDQJKf%W7uEeovx(8nqF@6}{0B zq|x`w^G+g48aS^iOx}O+7)W{pkr?KTu!puczRD{Wtcfoh@Al_wzZL2W{7^pST&~F% z;QzK!aE7YOprkf~7{frW)2Yl!Ah5Aul=3DN@k&XF#phH|UDH)KIk^|&eM-j`*-Au{@c^+%{zt@0Bp>3 z>rQJvI_=$a_t zy3T5sVz+7GR++gkx-~Z&C0N_qW&;+~d{Bf-w;iaT0CNBuaR<9dt!MTzva zXal$TnYvQ3LIXFv+IOh{qHQ}V#pXAj8<+PwJ3pvFl>xO0fToER1LAzVfZsni7h&bF z9E@<{-rK{B&Ut&*3q=KQxQdBh|5XABEu}LW0;L4!KwMTmavuUW1T;<+RGyV*THLc0 z8I6ux2%oKg@1;tf>+{~F9J>eV`EEsO-=mkoF|sBb{D%UfV_Ow z+3hKjF-FM+ls^Ke9?0V(7j1Hq8U~V<>}wLfMUxiGM6_E*VdJ>o3*&uF^k7X$(5f&& zpkz)(!`qGx1^;I*Kv?cmGmT}yMAvw`mB>GKQjk+*KsjhFS`wLStr*MNTm=d3()}0# zO1vmJQ!b<(6fHE8LVB)}Ysu)sz(kasP9>L3Q_YX|6_smxnD@UJfFPw?MiiSPuptq! z74Ss9t$NlIcpivV^}O7M`)N~BtB)4zL_(5fv~;Y-4T#Vtu=v|W9m zlNXTt8Q2K+G8b_M0P*&fIU7or+pN`VplCt%TSUBw-~>t+Z!dy=r>v2)okvT3;Bf>A zC3S2VFxmX^nz>zBS^Cp-g@`_5u}n*YvaZ_u9`JXaPl{dxWo$AKV0d&t+)9JQQXkgg zMNDnC@Nzes;RxIn0b+~VIBfPC587Q}B`kk$O!S+phtZUVAz)RuF|)d2sr z@Fk-vd24VaxI!v|{9b{A^=)YwJ0&OQ)3>2u zD_Sg5VcqJBtSmmDGM_BQfNrPz!Gnp_T;Mpky@VeWagf3P;Yi7T_VP(K+9F)IN0UID<3zergeXJ-EL2e&H@f&xC=q9-2=Zg+nDH$SGN3E#p=00BdXdk24SnPyU3C zlqRygd;4+&=`p;xGvOJ709F#qFS4<)Du@_x$YK%3=IToX`M4n+=N-tNq}d0-69Tt4 zy%AIb^#MB_kPNKzSs##Wu_Kf*0C>SbGNFEGd9@<1LrY~6B^MP&Ov+~J&gP)bKRfeE zsiS4>PO1W|u^;~{26?!jI>Srz0Mg;)9NsI_T!R24%p4-?tuB<889&Ts_lpC9TGv0ox(hXj#YHiSVbISJ?_c0#3FqIZ%?gA9rN*8#p)7V`GO4jtKPS;z@mttM9`% z2`^u##%jF-@jo|{>bngs*e=!83nBdZ1p=4Vb@m;H?C@Qd^Nd;#ih}geq_@yZC?V8l*fyTI0s?qGfJ@}s zcEt>TIMs^Krh?Q6=`2Ooy*d5%%jpSIR8R#fnpTuC>dlIoSD_S#jg_8ucRIcezi-&n zyb5enH-Ocwx&wS~tGmuZKdTlio35CObZC^1S1Ecsi#&PtB~10>zsf53ziKSQbAPc8 z;~N{qS|o3j9SwlSTL3Ub7$>_*QW-m$fw|e04&Z;cu3y%|UJ+jfB+epWj)cFY5~t({ z)QEBfWnUI#Q3C~8$E?l(V(zK-+tUta0n=Yu6WdoSz`x9qsgKd7oPMvz#)f9b$Je1+ zU~Y?uNIfv+Wo62(HI-tw`{OvF@OA?`1^-xK0`YF3;rLV$aR$QOX4UClTyxRib9oqq zja}am=W@E6QrtiF$X#ggIqz?)3Rj97U)7%>Zd`Ny8d3k}D*e;4H?*`$vF*{XuPS=h z@_kfQ`c{6&^4Hy*!NI%Nf`8kj~n% zyE0fw%~a2BuA3{08fw=GXZTQFc}8mmsyHq!scsRI=XJo&9P(|=xNGm6E3z#~-Gz{I zMFDfi{N3oyn~Z@|^>vv)BH8KgmOH(Cd9kxED{DM6Mt8ucD>r^GPU~{x5J{teqnnn} zp&9Wz{N}>WB7FRn(K8)0qUsP6~`JDznowAL369fiCuv-WPl+tj8hcfYUxI~C4bSq_?KVeQzIeL*4 z@0UN&i5wqKn5e0!6n#%+CuWZ+m&T0>0P^X@F@0G=u&w(*iBoF`{1Dhq-oJQFXZGl! zx$S)YMZ4JXnep6oZ&E}=!h}R#7RP%kE+wU8E1>8y#u6sRo5*Quo1aOGweHK& zud_gNrGWRLxoelYNz3xD(L@0~3JN%_YdLvlb@fHrzXyE*7lkW4jfpo7lI7uL20?rI zY);vQo*lo&rrvhB0D71CZbGoJv-!f@4^VI$%ZasT-$Qk$avN+qshN^fYHvO)hh)G< z7g?y9$5&Q9RRK}lZN@cJj&U5cFqXrM=WQYZ80{xh+{V`Dnu~FymNKzC8TB>8MrsNA z$)Vx69l=fHmg9@G#ZWo+3WY!BkMLig29$nDxeJVHU)lSAMuol{)#~~>d72KnzdYll z_>$txg0b;FSL+>f+%-|f6G}j_7(ciyjkv`xs%xn@H8j-HYGQ(Tt25YC{JlDnRam3d z;H@zw7Z)HomDuq5oKHK$x^)4e0g2DG8jbKB=XdGv0u#2nvG)DJQc0Sutcxb8*TwEY z=qB6TkGkZ)JFYB0xo&0({3gRXKYeXz*fwxes?qCVAzOWM;X@_MobG-59)j_U3Awgx z<%lQ=nubFT&5{yhx|ParnDtl7#!NJ1r%QMM7n-Z4veZ4dV# z_O{LMUt{5)O|2+;~k+zCeI)EXk`2u`^V=UQbHgI6hq7x!xFeOZlp{ zzzI=>g6X<+e+(aAR()oGlZWSXYb+a1h@Uqcae*;FH!woo-p{QyAaXuYV|1|(Dc7D8 zq*cgU1Wz7j{2DlKes}-fFeJFBATvM<? zv1M<;z;fQi0-!5AZz1a_o{YYg&1qhmU3As>o3}wc1d@C1pX}zYVS36%`l3D##@n-l zn5vv&B&e27CEXCO87Fhm1j0e7pidx?HR*R&EOQ_`QFOhh(peO{^ugrgj4hMw-d?z_ zN&hkz7wJm2|Bw7ooV$mx*T!TRPJ1+uRoY-DWv)QoSW!93XCQCt_&5x^Z$H2_=v-n9 z$ih>%&_qsfSlq zNXbJ#_46ZNUfXbViaBPix2SoqXROm4^;jPkG2&dAUr}#lvcMsd1&m!OB}-!3?fMgA z{)vKEk>|^;8+V54j0hpbZoeQjb%wa;XVVmCjX;ODn{&QcJ@UH<@S4fqZ2An*KKGp? zOppj%eG@$TI}lPoYcbo&{20UC){?t4$-c8?#nR@a@F^%L)$M%%=oIz|h6|QH@+~KJ{MBE6nnMHg}HdypZ_4ZT< z6)nNc@a+p`XJQyKbC~Q9=DkYn$Gp-Y+Z>XY7dB1KU}UIvax>#QYtj?2Mq42Zq^fGC zFyqv6Ql8X`i;K28V|8-j!*q-l-wWFAM?HBP$h=3=5ak)aDy*)dn%W;(uAaO--3L}z zdI(#{bhIhuB*mn-r56P{{OBhm=&+1{2Qmld>S(pr+)m*q#EUNZB%8$cjK?)TarO}gzFXu{e6-^8& zlAIyLK1>-_iN4b<2}pxo(GMGPK!vkA%bO=>HOViW6N*anMzD*g zPl>!?B|R7 zu37>L(SA+tyz&AM%`?7y*K;~K^C_!NT%?hGvT(q!9G!LLP>vKEXGZQrmvmPFf@{k- z&ADhX`rh&C)lWb>$;)iq_a3_cIQTwQo3-`{)KL z^{j9bWB@#3hdfopI1tLPS41-t$x=c1W z*oOyt8%-qa(^yn4B6V#p7ftIUOKZW$$KhEUQ&_;Tkk%dl7gejMxR7hlgu2}CS{590 zl2Aj<)d2*I=GfUw?2K8wNm33{b#==fNz3qrAQK?eSKdOMA{@k~tqvc}$0<5|}A(-Bhw=Har1kdb&@Y6RsB$xPym zE2qiMyrp~sptmAmu%nkYHk0`~9j;mY{Cruze|{PMLP#VDH_7+bmtx`=%JKFTjD4aj zxP0A8&+_QfHn~m*dC)FNc#MA>5KNs(-!0P0TAlnzfm-BfY1v+qjN_M%pqgO!GEfVt zCbgD+C-zzSyRK(>8HiM>&Zmls=&x(LK&9HSX7rud$0EMvy&*h8*2fHCVO>w*GD7@` zoU@~*27a43;i%eJf}WO;W3d5GyxkG-6aHk;|=UL|&g<-H^3aCs>h zo&!|O4)+^VYD;Q%1<~X|$vKXA<9CPD*(BFEWyLvJfRNNof_mH?;gvWr;a0b<)O=)^ zs@O|lHjYG00{%G+st-l~%Ut*a*n9q6hDy=@;qd9^vjxaAfJ^lauud-B8O8Dlz#6=~ z;$Dybm*}uKOZh*9B$jmlf4_?H&4c#7#`oa${^Hg?)5jGA?)t}o_Xh>caK#;!ndm#9 zlPdt@-7U;xb2@w&JX+h;0qDB$r3bT=IKP+U!&);W2?=#`eB?|$NbPgK@GC&J(x!FE z6{NCYo0DWni)X`ZH(DVj$puJ1YdG2fPY?aaght5nE(Vfhx(x0po4?oPyL2mKE=hz2 z?&Nus69mkVvI8;|ojdI+?EMSD6nYIXpkK0={VGrKl3frk-S8-^CS`X+o{3r>IXta& z+ZuiB6zNM2OsxI4H4~7-u&$s3r(?zS+V$)%|4UwO$V9`ZF!bL(Z zDAVk}@ldOiCbg;wa+F#2U-hWJ>c&+QNt^qHvCj`C3XX4OAKyyc6fKeq*RQ%rv4r`4 zITI~vY%6GN173nM%?N@0htlT%QK;f}uz(_Zk2QWX2ag9{_CL=TX)k=;yx>NevRnH9 z?>({@pGXV|mHaO+jz(+4KUwAce<#r|o&$0sJ=J$^3#J0!eOwW50UaRk+fV|nKdG4L z->3*5{nB^bXaCN*VX({;G|jKHZ}I`m-)CP97fnW;fyMYk2)WA))cK9>YziRh&b~Kr zLpS`oBuHAg2R3}PORw*{c78-rae=!+IEs9@p1TR$+wtUr()1SCtW3#EyW{hm>UaAW zHF5x~X&->I7&*PM)~j09b@um))eJw#Ngx#>H6zPHTG@k7h|~i+0~G@70_KuY+tcq}%nK`zfhzYDXj_&D}4@Yi~iV2Fqaqp>eNOKJGud;vHD@ zY68@D-budh&c{fm5l@yt z^?<|$1NlYypa+}ksDuQjubSZ#1@;k5dBw;)`~Fs21jLsJzp^qWSPox8y@_Tg3E;b8YO3x2m!BvDnJ1un4|k|dMAHH{*vqJ84F)cR+C^92!q;4O>*h8HCLN0Q*m2eAi}8B?9MO48fR zm@9vN$FiTh&yb8VM%=dJ9VAKpn`$K8_SZ(<(`7vIAfw4#g7rpU%=RO=M9B7d5_0j7 z1TTsBVG|#~PNbcJGR7JkCGA(7u}J6wOD?jUcl$U&v$aecl>qf!lM4YPWX&SEg=83>pb5Hm9G50XOyyyYv_E%*X4^C6*$@2p zBgoF!QJ^(tzH<}RS72U@>AWF2$#o?Md&=U?n#oh$QnHKF4=fn0edlvF7^JYL$lB^u zw+m0C$tTF})RDsFkMx&6?qG4UPpR;6@ckr18gvb6x4&2#8tnShoxOE_ltHWKf}K^EjTk+eFrxQPZm*~uSBiqA9K=vWs~Z{Hx>9*4*|i#)!a6f}`G5d`_lO}Bq` zV=`PkZQ;|Gvf(ctesP5s5`D+Ro|Hi+a!ixVNzA8&J<c;a|Q(|udE#|moP z{^z#}gwy1MV>x)=fitMmlcxIZ zb(1DCL@M+{YQ4yF&J^z!OWxcWvjRK8QPE&A(b;{V4G<{B8caw=O@qhZftmX^UV=_+ zyQJuoQM@(}JKJlt1lGBN;c2#D8n~g};~1vC4H~egeNvr>O|y$|#dD0QIi)i(-Uh=e zlCT-qa06r#u+(r1Bi(!y^o_@LSVk_8!2O4q87~(@+}`}GxwiQ_*fZ0u6r@lISV~kN z=^IcIT;fDZz@z3T=ETly&c4oLI1+ynHdPNb;bJ#(=NPwA>Gn!(!a8mO(od&V7YI>%tY0t;OQ}5(4YJ_j`-_H*7&a zXtoxqKt}Juzl^z^EfaFI z<@V+9Gh|=Y6+1Hra0U?0DZ6TB2dZEN9|(b@9|R8YaKt%+0;XE1?hs*T#PC}lL_GnG zuxBLTgL;hJ%Yhc(kF|JpkP*8~wd3Q8>3;hSop!k4FZd(TiZpQ5y&C%%nC?e~F~D}g zfhzpTLQBTrsQgy@U2~8TTQMXEZgQuu{oEnhkk>T5k<5qR*a2BvpXSE@VD$VP^9#yz z=(XOzLEqvKS}NYs=SVd^Yy(c|+YkzpoAUv|I3<>fXA$=Yt0||nE@XLI*fR6+e)2Wq zhi}c&LmPP7`{b;^{6W)mj$i;Me1E)Uj>1&9qP2g+24qJn$o!W<)lo+~B4^4QA&9T& z+lYABz>6jNv0b-$2C!L|_4TE?HBlRu`4=R)IsSk@BA25+{V;fdyyl42#+bY&PVZR1 zbwPz5EXl^#CGwEWh0X8YNkY|N<|6SSurnekDOAj|fv`ZOUPC^4ZG+T@5jxyQ?TYk} z!j7k)Dw^TVqCp34+&`tE0*@;i0{lrD0Df#Sj6(Ax@SC;Nda)X`VT;2VZ4J}ItJGxY zq=!umav6YY1SK~x2fL-ZRVozk@?!2H+!i%ktUOxev(tl!`-)i($(^4%8(olfkn)h~ z1+oS~m`<(E%bMq#U68ejABBSJdD@9j7MBMf0en030Y?4yxo(b{&MEPMJln|aMB>fn zM7;Oi=ha7k<^rZdJ+=X<6r2-1yU@fmL^9Vf78 z^MmF$2$tDkNa_R4$9QIhe@rC4{I3}T_T4FF(HcxooVc(&(f^bR?cB5s_5@}V(go`z zrMWuM@W8S@?nkQI@v#1I6TU7Bwym{rvXJ({pK<3nzL`G=t#=D7?N5$o_`RWq3IBo{ zxUsdK`E0#}jE&;x90qMV9ANm3*8aG!8NLHgZ>ukK!cW+m;~3<6X%g;h6Oz3UA9>kB+{Nazb=Etls6(x;@`~&s*Jv|a`m`eE&wix`X_LN@#GW%b z#6z)DodJf;&#^N5P0fxZgl?!R7$xw)f&AtxK4e8Kn_rXWTa+?vVZ@xuiSS4>t znrSmQVF4xntxuVe-7iVVaof6%oykVluH#F&S#KVr`ie*U1Vt;n5x22ofIaN7YVCp) zT@;m*V=RvW1(IQB;#(ZXFu{wcceWR^nNwv~-UiZv59;71PG|v14h~5m+LPtB<&G${ zjcH!rFKs@__3+Fgba+#Dq}bScP%bl|Z%z@2B2(S$77ripzx%e?c4F*%K!&EQKx3?Y zTT?hpY9hP(^57^3a22bNP%@MNkmId3q0XkEtr5&YgdQLMz316(j*{qCf9A-muHy8M zQsHzo)m6_9T2Ey6HOW@cZ_LGDV9%i5jW($`%Q_yV;Xhskvzg$D6xU_UM87@RPf7z) zA6A@StRtosx?7-Pr*pxMmj+2sN)m3Mlcqb<+`cb{`$DPW_^->SEv;|+^2NAqae?f> z=GuMxP?HNFA={kN-bU=>8 zEc1!YfcktXte-^c#qcz_Y0bb@dSlyn1<5^wxl>WMt>qbp?Jk4d-f z%xg@Riybz4K?&_`?R`E_Lun?QT%5;KX7t&^>f5gNscsWVD6_zrh5bF_8^#~Nol@90 zS0|2Y;H-g(M(l0%&IiCoh)K0$@4!+lw=&=L#4!NB0eUDSrQm!8ygvQEiR{ z#NR>@(!$nJ-vi&KkM&>Wov@PdN$?@eIRRwEaVj5#>q@4@U03%NAp7aPouW6Fn-@_?SW zpk#S3ELVw^gy=g@S;&N@ z9}#IMv3vxCrdtNlh}Od$O~Y}Pvb*q5*B9@ zyqM6V;V=zxN!3a(njR9|#ehAcFn)UbWA=b9W&~Wt>?C*cVB4`~POQxn*G$n!L!|x( zah`^;yLeKO5;V;cDK2*`zYUHg1|l3)nEsHFFnUWf;%`#esVpBc+E{y+H2Bx~{VQed z>o)%E=-`5CD1mH^K2QmK;9-9IrySHue%uYOz0LjlDBf!T52bZlh>H)nxDIhcxNVVv zevk-K929V7|8k{6s|HztJy}f`*VUo20u~ID7JGUKw-fP?^xa$Su!U1a#CJnndx~k3 z_J_MbYu^uRy#|<2(G%-WYFdOTpfi2vVUW{PsAMfYT1hluAeq2YeiAfY+?MjIoY9F7 z^GO1W7OB7y%L`ZdZhM!%#dEV9K18pEn{n2$cVFhf`8J2BHdB9qDx|kNZiLF=mOMZw z4CpUEGZxaEPgb0ptni+H9GIPa9LY^vEn=)cN?@h>HW)DVP@v{N8uHPP71z8v+OB|!(ct684 zTC@&odOL(IO`8&Hjx|n!H?TYLE$D5B2s^abJ6Tw$%90|dk+r=+4QKLV6()|D3IxMm zPeTV|i&SEMS=wdb{XU2wiM8jTh8;1Vl64!p;u+ zHy*T{`S8@to&+#MtLd*GPM-V6O#D2#gZ^bqaa!%RCDh{aZBaA~@M`WHff3f@KEsj( ztptmk`$<>U8;1MF=dGp)3bwE$SzI^cmYW5qy5VUnqY@`w-v*a?z1!BI#sm;zgDwRkcQBc{vR4N%$S=grqkBfTEowFYR)gGd~tnp$?F3W|wLXkYAv@fxafs z?$q_iz&sC(xkbz9)weMR9pE<%g7|mSr_zl7+ySP3~FWZ+?{E1zHmMVE195w1LA zUG4uR65lqaDh$eipLO*C(&h<*jr27p{(~lE{fmNUKyhkE)Oh)3>wpP7+jB??eA5+a zlCg0KUV8s8#thmP)BSvf%^4}na@vUQle^9mJ5zr7Yu;r98nlg$-(hi1g>TZB1Gx+w zDccLrcb74Cpxiuc2@ZwnmJ$^ORD#DNC%S68Bnkd%Wg38oFLsF+lDX~lVEJ}4Pap{2 zamIGT*a8eh6d~O8mjEt?e z`h8awjoLdn=Q8p@92}*bc~^qvPzpaSQ4R3E#|iwmhuSYB@eeSs%!SNv<6#&Tq@;fh zL8f8d#-s!o{TB;mcF-+v-AkB_B*(aM#1Ec!$q6!_2ZsPAEzafg6{ojx&O)@Yse>rA zS}*c0N#N5w_3)Gdti`M4hdq2f_@f2iRDk8$GW+Shr!YHd)<>+8DVkp`_;?@jYxbT4 zIhI(&t!q~d&YNAU5VgU7K0f#P25VX(Hx77uFM!{@PuiA^94W68O-CEcLI>Pe;Yw8| zN8no#_mMM0g5#Se2HMWJ%kc@pyaQZ7RE4ulz~5Z5pE+6b-OzQ7RH@iMe`cin_e~sr zz#2YJVQo=UKfhXN%ZiOyg@<)LzPz^{jcT8FqqbfthD%PXZxK#V;~yO z#yt&tcWD`)Tf=ya&@+FoM`~g4S3|AqQa|-4i3SN+i383ETOXKJzm|7=FCVkQe>-g#4-KNG>pi z*Ag~sbAU!vBhJpwn<|dKj`^I9uw%cSIL>GN5PXknN__d!uw{9!US?$Z^7eR&|G9nb zK>i(A;AklB+-jL_S>U)#$=-3YuWrhNu~PQtm`F;#kPc5l&rnufu zbYDaO6ZrbI{;zs30$cC;#02SM4_x7zpc>GfpT=F6f~}})zj6*jRoc-@S8X!qPQ4eO5KK{nfXFo>oWgQT@EBrsrSdv>8i{tUh>~(pbs~9v7Zuy6`-&bsg zU;1+IH&7#3M!s>SGu5Y~yIa5z_)5m*<-F5wyrs$!@DlO^>fK=~WNjH&J|}JL2CcRN zXIy!?h#%wv7gzp>5#aCQc%Uir?|yW8!qL-bU4-2Ll|Xs!5)MG09{$s(8phFgurmjP zxc&{!Or#`1L-)sJwlMZGTX;bU^ArN{lOV4+s+j_=7DsFTrGlpkpONtA)1jJ4&w0y9 zBkK4e$42kAXuMt0V~6K?y?}lLD$)x8{f82YRqmk=Xp+WODZaHqHS|JH2V!u)PHSRV zJF`aUxqiaB!4CD)HAm$shDU#1oD$0iH$GnhN)HX`)0Dhkd*$`sjy(kk=kEjQRoBj+ zOuS$*R)Z@(Pp0Z3wajvEpI>v|wG!X`Q$(NShjcHZDbDlK1PRTYlNgiH8+$#wd|Cz| z@dHrW9JLlTvb<(QOo5?s29ZnQ9aPc9KJU0j2H?m~*$s|+!`bRBEj`To!!_Su&aQLB z-RR+Fj051x)os3u2sk5N5+jB+zSr|tghbTA_76K=&)0lEV0n!E^&-SCSxnTbK!mni zA0X{h1|Z``A)xvR4s#}FyEwa8nU|p2kmqLi`|Xm+1)Tg!;LrJUz^B$r53LgI2e|ih zRL9j;p2R+=R5#Rf04IhVlbxS8&lMIFm@njnk2dLLti>sEYF%ay6U()$tm6TYrU!@S zZA08&o2k(|`-)=2^$GDVlw4k5Hi`IrQ{M<*@K&n@ja-LeMwruSeVAqXW#Tb+BYqQ= z1Nd4Val)EQsyN;8S1BkffR^h3UaOtgdo%V5!03~0+#?4-R{yHS7RRqXiT!(dQz0Ic z!DhLz6pkSH;`-fz18Io=^(Ag#ZXG+m?zo%)(t1}G4zsFD&Cz?HRIbZ0qFAJkiZ3n{ zyUHZ?#JOACuVYo1PG)Dp>>(dtkGtCc{(|+!`mMz&{w>S>CVjX5%?Zo@(;GrOc@x=8 zrkZ?mJa~De#_|yBA^eV+B_$D`4?MT()&%7)z7AN|ang1Z0Xg3U_3lO7{%-v`E-$+w zWI3jxFU4-ovI%u!RacxdEXx}=76Q_=G)r_n+uVO5H>--R(qhVR%sKz;4J;q|1+B2w zG+R9xDrP3=VcEb-9lT_!QCpfgT3uTkn3-F#l)7SE%C!kKCtGzf$NP^G63vvXu~$m# zPwO-i6WoExLgWZ8L)Ic*zjv%zAe)-*&@tVX^oEI-*S>>$>nU0S^?z(Y;o`CPOOims zOXD44Z0HbRy+G0uX3oZXmdj30g}9awfCBqT^&oH+5O;qJtjGJ7O9 zFm|58UI`IR4>$`%-iO?w#NnOUkD}5DJ0rrZ8+%Df$=O_4iAnF&R3%aBS#-siDs4Au zPu};-7jGQ?lBqp@S65$0fc9f|mtTurLQo6KdZEXXBZYE5lUGFh(WF^|K@_Ekdn3zM zsd;r8pR=+!lw%mv5*$j&;37O@3^r) zX~#JEN{vs;T_*11Lvv9e5?#kfZUV2=?P=u0{lLgaiWV{-_JOjjAxuc{xyyo5G23oZFTXc19XmYlW=cFMOz_oTm!PIZVsQ)70ek^)I;8{pVO)Ch;fK>-+OB zR;+I_5(DpX{U?xdj@#4(sq4_mzVmAWSC|6KB(Z)edp(wIFWf^CEc<@ma+q-_>Rh({ z#t>TrV7iw7gX5lEZ~)GU%Sa=HD6iwyD8_&T&y1Vw}){(KV6F11zV+Vo;d zyh^XujB7+5l=fA;G{;lP;cdtA-|zYy!T@K{+o-*NsJ8ZLRTO10<8ON^=P4a015ca? z+r{o8OhA6iqVw14o|4O!sd9v+EWZ0HOKInPU;Ak~Fg5N3_X-;nx2H)HMQH0|ZX*ssk+}XuMKD#ZiT~vuk$|VAJ&Z*t=v=5OdRx$l?VYEWxku%sLJERST!$)FKp^P zK^)d2?6@Zet4g8|>O8qKq~SZL<=-U^79&pL^T{kAY@fz@jj`+=A3^qbLZx<7zxvn8 znCJ$&A!T@2_aOb3c;fkv?h3o8{K)b$Kpc@Xb}3@ITfM=?5fINZGZKovjpv{g8*s*z z#w*4dr^B4|IkuiZ6!#$E{+?1wua)e*-cta7OcM_&_$`XY^fxRv)GP`mYCit?`<#!u z)O1?#HLqwcu@Q)8B)`~3&W8} zEnEMW>R6;!ahJ2~=V9U8C_VkA?cEnhWbZ9abJiK_Aa^>HsUkfhj z0(iPud;h;1RsGYM4BS4BBo?1pN#z*aHW*UphVT_+RSxL8zZwT^?!wL*5(WgURc5J~ zwj3udlG`0ps<+4OoZf+Ne3Tg<8ENh1H_fa0o#YkT--?Zd)2E0q;-YCiv=_qRKQR#8`!siw82PcMRPpC|$=9XD_ z48VoH!XnJGAcmB|X}o!4+Wte{hb|(3ElI@dhIKlGn@E#NQ@{}MDMXWs)U@eu`i}y5 z7Wv!d_$P_=1+x@KlNkCyjS~EoGOprp`AZgIk{~#!12#rRDY8~ms@zF7&E@2?C ze*ohlZdYi2sFOWcdN5iDV}T@gtJ#^*7qOa3c;_TO?aC{eNNS~c-S@A3fxXw?jXp`x zcXBDy^~{=_q=l^a5$X|Z&=OMbk7X(@gi6TB%kh>#Gx;wHf>NJIoBZyvTCvXfZq5>2 z_Vtm8QBsv96Zx$7&5>^!7U~mszX_Pe{f*(V*;_SZU&hLxNCvi&Za`3e8?f%ijpW~v zPH7j9(B5Yy@0eg*^{;!-PyDi96VZ}5`czW|FS&G`DbC{h^M96P~3bV*h?{?dVJ0#q%mRr8+v;_#l zW)Vr#VvZ=)i<@=Ti)d3li`)4Z*fnt zEWapv`>%cpbf#%bhP4@QU7Vf$Mhp-8f4z~p{Po+p;E8=YncjQxQg4=&buk))@pWA8 z?xvP4xLw>mr=&7k@3UMmaJ-5!|?LvrKIYw{~{5bmIe7pZYe6u)tae6!ELU{MCbL9>JG#i@Qts-zVc6zT`B!Wcxp|1np)({43k(xET6PiO1@wpThBXHINkd?ewwy>3Pcm9XIa z7pzbn<^m?n#KEM^rSgY<@#i|uq1z7y!p&+`5TA_iknuWQJv>DQOr#ruDDR__Of5O1>#A z>m!FP_XOvw46qLH{3VaeMl4-)f4x_<%nPG;rZ#eSagH}+H>z%--0SDs;8i{dF zfDq#d^J*@@K78E6A!6UdtrlF~BjQck@O#TKxEo%|Eh#lE4Ky27#PW6cY{}z+RSk!z zW5|efwsQZ|wN+O~+Yd}Tg^i+mwj9XjxTxb>Y0s2H@{6-5^;ZjkfHq?V81TwJT_qt# zLpP}+iXsYFOc>9o{@YT|(APV!>3)@ZvVy?;jC$X7vVQ}jA3MNVV7gXY{(Z}HF=xzMp=UHx8AX4KQv36wot$#2s=j%t^p7}~ z`Ftr(hx^#}?RiMb!}crOe?uM@+26rQANa!DC27XE*wi*7rq1(^-}IGa+Nsmix$!5R zws+gt*pgQA+n!X5j*jIt9-n))$``JeWY=US0nmj+sGx-Nd7!<+WY40utk!qB_h7gn zuOm_tm|mY7t%qgi} zqr9lDh`@L{Ts0bQY>)U|{k!7t(d&(ws@pdjwxfTfn<>H6rI%kXzpQV2Q23*yl};i# z!wx*+kaC=uIKbCQCjo56lhy}v!kUY#B-9g&&MUee4>`L5@#;+X7D(Gn`@Lb`-JL?` zE61L@oXfPp6$b{S;j3Y2-}~|V6(3Mz&#jHDkzl>$q!%xGhuvawfXon04cvspSmeX@ zTkOm;g$8U3`Orn?owxc-C!>9+(WIkO;S6iQUb?hYv#ABLEi|R0t3^eAS#ng%`PyBq z$2UoMJxzqpt;dd*7A)K4`$Z3*y@2oKssX!xwy%S9ihn&EDK1^Rb-oVW7>My2k?=d1 zwE>7l%B)-NQ`BI3C~A(U^;LXKIQ3aT$4N8w#`O8~mLnte>2Jikfpc3p3hC_xsq%TO zM4ed%t4S{&SKU{;Rl<{3SeWiGEhgf3`sxkdbJF9M4T5ucS=OriP7&IS>ds4qzVHf& z$#1GTs9JV%j!SOq(1qyCpL**SjrD2H_MOjsmVj5ID_6!c_mF*gw}1>$9U{<4UO=!c*(sd}XR{0GlM>mHd#& zr3O3|PF3zX2aglElMIm3OJT)0s|D0XSt0(3iA>)_HfMYb<;(^8*FEvWChGH!64>JP zlZnXX^dx60ct%G3r`fAR)(@{+ucX3j997nTltf96Q*c#~yfw3F_mHOco%=4cy9D|_ zjHtV-+h}&R{w1aeipavhrCD~yG-(d92?_Ks%d^AA|GWVs;fyX!ogd293lSWKYN!NK z-7`gkf%U)*9ysdG*1{apK~%zDxV`>|b2e;sYvX&t?`zX0pLVN<*O*bLC#j)M>W)$? zhm+%x)v-GOX5XCk@%2Mu{BL-_2ePug_K{j_UY_@x65l8r>{hR-iWV{c-2LZY@tC2} zSK6BWdr8&eHTNq#H+MHCYr_lY(tDB?JLXd@`dk*TNlZe=$j+g#M<*;xCYm?I6E>QU z$aNVWf_ojJYn~EYktd8amPU6-Gx1X1*_|KoNjmX^u%|ep6f?!29t8saw~a-wgAw5L zycjJ#X|!RwaM_h-@bI~s4c4&i?Ebc9`PgH5CqiSl8K9ifD(LaXy(72-qF9T z+EeWU&cU(2il@fA+}q%aSnkEm!>zY~in<{cA@Ka4_U#SR_rD#cl>tfJCk7H(W$Cit zyp0B`9GDkvb119)&iD;tvuz#e_CC(x@(X^36?wfjpuIAFBw-!`$+}L6O1eJATu@Z> zeS6be?0mxk*sV!siAgpezZ-TJL(~Sc&zzDrdJpRtHkGsS3mYe97uQ4qNSUA{TjSHQ zb{W?`i;Wr=`o=iTD|I9Z2G=8<2!1Qi^WifMF!9%~Uz6s+NAPR_A$!8Yi}}L!PBHEExv*7CV#z?kRzXx zwePtnDiX4bv5RfFmHcs1a>5>5MtVY3iTo368 zBSy%AgwOKTD^uR6bG+vPQ@!}eK}ts@y{Y<9lh z%{>7(0ny;O?efA*L;%`ctRF31W&d}2E5biG!*_SwsEOjXHyrxM_(l=m>g;UKXO%fj zVs%>gvU>XW;^K&kCg`ISKfG9f%A94-(wW3bC?px|kDhOGQ^%og$gd#TXe+MF%uQ9>RtlhjSao_X6QlWiv=z8*YFWHCv zc95U*Tl*{ksOkEC7ExciFK2wm#=y$l_&jdyyU*5#sW-T+wSy83rV9BAYj(*c7|p5- zP*Lt(C2s$|$qh^SLD3;?usLteegQ2#f#1;Mhx=@~cd##uQug#@+WQ!4D3FLvIIS#y z{lRciYZK*#t8dCa?2$eqYzkG>`hnu}v=?&o%@)!;>KXNRHPMDJ)(gLtIfV5*jLcqL zdzr3V7AQtfSiL=xeRd1h9A#l3wK8FUdu}eaNeLbikds(6#(m~On4UY|{`0@} z_SR8VMP0loDJlrkNa`UEAcsRYN_Pv=$e|PtjdUv=5&{Z{w1hN=4oL|?KD$|Wu=3gd`aH1%$?isl+-Q%+vZCM>4FLhYptv_Fn zqjvo1fQfpc$(>3t>@{|g!#)41>)xBMasm##8oisj&4F%HtD1gv7^2Nz7HSwW#hjK^ zXKim?y@2_HJBmcO4MY9lHQ1o7T z&cjiXuP#p9{_=mdwz=OQ7kMOzPq&FVO+N_v@k*^OgG{_3VthpQB0=wR!jpwsr|sJ# zvCBh^x{QQ-n#}eyzkD-Cd>IfYdP+k%z78|L6a_a|ThBc;&#m-iV>WYax@q!g6_Vxn zq^Y9FgzB^}mfwz3jq-VT8(Ml=sm?H!#V}qdzu?>~^Lz3-bFfqnnzTnFVW}hY&TRu|6iZGYHc_;#!T6=;M5I zu@KVeM7^nen-Z4D+oWLKDU|vKX_?~j2wAyRfl8?K8S04+pxOBTLO(o=sF^D1nWBUr z7RHlBM_bREYXNxy`M#^#e!;X6Jzb7IG0Pq1Gcq*=W$WR9}-v`!1$XHGxz;Ju3?d_7$-~Fqz-TE6`AxCZgbmAFboB@vpw`X>{D5_s?;Q4`yp+&*nu(q92(P5_YQDvOXM3 zQK&25mwrn#E+M_-EK&pum?HS1V6*YWNc0j{Tcwt9c?F>?i|ZjdRcU|o_zfkhlL|Sf*JO4t*6#6sd z))9zlE^NUAf^Dw2ZI;3urd&>o!HV~lBwOAU(3tiLr3_y zwo@Dq>&IIC=_q5Py@lvJ~)bk86O=6lECs-`cS+vF4R?1a5`8Lz8f5hwN;QXNo=c?jvXM+kXL zZ{fR)2(kS!>{-~iH=BupvkgWQsHFQ0nl0nAQ#HEt)v5_D{n7mPJ%6eB$6dBzo%dol zr}W0m50j!NL?cZ~aH~9pd~PlYYyOt8^C8b^Q&Sn@ouu!7Z2TKraeQo8QMr{e`o>mR*w#Jucn1akO<%K0P4(PM_E9-o!Is#(#Dmg)gU%nr zfu}Zfk=B%XA`L{IXsoKmS$knJ0UUJ9m0$2vs@;z#m}QrP()(4L-Uz)Y&)+>c&-?tM z{m?fls{dGhdrCoe!pG)>44r91BuPJ5MnAvcCMF?)?Cs{MVcU$kvn>gf=e4K+ zEIY5%0l#Yc{H~GRO%jq;l{7NaX?~l2*tw$Va!EOWL!6g;V|RjdbQmi_P~k|4PP9mp zUXMkVa$n5(TjbZC{yfEHO}@s6XU)MAhR;iwenuS?bGM46P1s#OG_n)_^{e<`F)n(4 z#MhhmOZjBHC+n!Mnb3uCsoSN#D%WpU+G+o6w|t{efzKIhCJ;y;=T*d-*IAwQ-qA&e zjrFu9Avb?&5}!fq*2tx!lI=L?buf{t;K$GNEr*G|60r%OCvLFtmfsPWU#E#lzGJj^ z5J-24OKh#W7zB2H1z-toN#@QKIT7z-+v`-{OQAODyxII(pOI30!$NY?my=rPyDHC7 zcX*KE&@n$ZDZ~@^W5e)WzbiV3P%lfZJmAa;y#rVeUvngekCkh_%!UTY4Gl1r-zE#Hk}&z)ANuUX6uTLX1KH6iu4SMeyvMJ#U}#znRx4u zhAoD_8`0JNy!e{8raL|jJZKHWXErdzenjbPn_Qh#ae1n<%%xPl9Qq-3|KqSdix|k4 zd&bqG4LN<^{y{{&SoAgiJ5@T`)MPMt7UWu=-|+~bcE6ygJje|&3`Hgm91RP<#6&m^ zl?L)&&gv)c>I6Bq#z;QE@=mujVuGbc)J-ZI(@~FmBz+zEb=?}5m&Ws`2f25_d_&hT zMeMfm{a2xpx9Mj+&7pE0{v9TYdfX;DgQ6ZQ+w1WVOKlz+r7%E??vN6v`X#7mCH=q` zAAyapE{K3&`iiy98Pi(S+d-Jqs1>m2gX`&N8|9*Xma~^591i(TB3}RSnn!Q11SAty z2LhM9$~PDo)1lP{q*zxEu+GU#wi1(ToVWF+3$8K6JvSdrdsSkp{vsdSQ&=SHfXpks zao2nKR=bhHVJgP#gpgxwr#W`KX%45+h59_Lb=1E#ux5=j*msPSm!D?xh_i7dfc>RC zVMOpuTBT2K*glSrhu^_+(Q?3^FAiq9#A0)i?Rv))C7k~OUT^Eh_s*sK<{j}+e}8xS zuo$Zv<*eTz5@06?iuyj>!R_R2+0DTBr2FKk{+8{N**D*^8@$QE05QdPC#W>>+43hY zIQ}Ajv(Wl9DVXCpW?Tec)M??s<<&&7i4sl1-^hN;fnjY`&kGI3QUee$~u=;px~c+l_fh>v)-4646RYI zMGcJ#1ph4%mqi|t7+_XC6!4-d@=wnD{sqv4q-B;m^zEi*nCx<0`rO6h$gDYQ!+a-M zaIEs(*w5QpNOObS{;HBd%}|u~E}e7@EybGyigPc-0=0Hg{pb9AM<$g^F3p_D&bH8v zZ$)p*9hd&nM~NSv-K)H5`(ZeBYJ|(>YBIJ<@Gkb%X*AehHlj8V+qgmiflV@$rnaTJ>3E-yB%!l7Z(;dU5i;UkPvk+-nWP6cd;~Efo_t+ivDkc_S)n zF_6(BEv2B&=QIT5WBploGQ19!qw$SI(H6tyhG=ETxfqi89iPtIyVLD56jr`9WqFqZ zW;@Xu;7&I;q%$q*CCl=HKDffT0~JW28PsoSWnGY)x3)I7m2f2dmEc>)N&z+Z;?#&M zJ|^ahy6#ypTyojm{P$FY>1j1M4|nWd_jJ$xPR9iy)v{`9{rC6x5GdJmFVMa@%E>qp zqBjFMWcBCcqYQbkO(7bEWgnA0qL21R`Dqb7%TKWi(QFfw4>$y(HyRyjco(cY>V3P; zLT6Z5mnRwKrC2q>Uh)ZhiCniOx+7+oa(rLiadA@3@U+xfeSX9|8?-^IU_B!#IWB%N zcIDPUyn9v)g%xL(?X$4y82xH_&-Sv$k>K_o#gO03PG(i6Mlleac?)Wxoh zzkZ#RAv$*iPA0<?p5TI(c*%fPb%h|yXh#9}+od&`66(V=BY!at9 zIk7xBsOi}HE{K1Wh};otJ%sg|48GSym$<2XHQ}0;UhZT!UNFhe+u&#Zi-BVEYoQIz zk6BEtKK>k!uXN-59oen#H#C$7!G7-PvGjlO7E@O4 z9~>C?_3mEP;DCcTiiY~y0P-|xNbn{!s`{H1VT?rFdZoi^hjPWnH0UWdWA*Pf?F2q{ zDM}WM>2Q3@Q#spnSGd%>A8*z)to8D{v=TfSEKlEzx6D zjS77s+jd)?-l#a)l?WX;Tc)Ebj%TJD!-t2-yc+pS3I&^q*sb}*X0$1s4+e3!zODMPkgg0*iGPr&B(aJafWHkoIbn?yn9B(aipwEF(Tb zE)RA!qq9Am`s4yK6E2AIqxo9i;MB(Jcz6)7$HE`F2m<*+K3WAwBPYHr6xDH90+dKJ z?Rsh&I#)Zh)dHp)@(Kl)8z7%vR<2X#=Q9ocE=hoD=G>n0^W_WmTpr+(7lqV$&$SSs z85tBM`&CvO7-QA5>mB|eEWnoyhfU!_Yt~D+YOT~|;e-xl&Q6+z64~jc=v8bLFHNhr ztLpA~Ol=DP;jEAL*SYLghI$z>Bx+@mq)Q2VMSMy8yqX%H-cF?NTN@mecrv%Op1ZK* zT_IK)jC~-+CGwZ;?-gbsjZ$B3js*uPsn44?%deEa1uE2U0U){Glk8_cdW_@GU9v@h zu6VXU$KP>v6}R;vcKxiMjPxmFUg}e~@*I8F9FuBVuRD24=QQ2U(rh@6;Gj_Rv?c!F zeqcjuIAb5p%bdyh+)0L5cF{PEck?cHAFc1k^ws|X#j5f?d35|)v>HjUlU(KK_MBDD zd)k_dMy0k+=i%MMg||8n?0m1!ZgO&Saon&c8u7lhqoNTJ>AAUSIXSt4JN1g09f_-F ziYJ#V3J`*&jWII9E|kq;&Om>j!O6!BfoihPJ2Mi$H?s#RJ7e7w+4vb9uBB62Mp3DM zF>NO0-V!`UyiAZ*?AcHk_UipyN#O@v3IbAvnc)uelGI{e^iIpvrc<>?@-Anqo0C21 zl1QZ#0c2b2eu8;+{f*mRe^P0Ps3&Uv==fkzal`$5jrnEiyg~wpVPL0b`|I4Nv+d+* zCapm@YpY3sStauDSgb8ee#`4}!FVwvyr^Z#Q96Mw4fF(vD;eV^04+3a6`uGoFEc?^3bbR?j0WWt2pww-9E81ictwTXa+mQ< ztc=zxv?mQKS15TNdSB-%u`hjY30P!wIG=-~eP^3Z&o2nn zU+~7{=f}kvebkXSGzbqdOLmNNjFS@pJt?3}6vjJnMeysvbfY)_!Aifvx_grG<-ESg z^=aVfu`jwM+OKK@{{1!8nKtvpDH+hrhet>DI}JC@dkZdqhG`cRjnD7>{ktI3OV!u0 zV;m;ae-DTBsY<$~21L`=k#>n`i3$3^gGoB0`?&s()QLC`e|7CS-Rsxx0_!QW18k!N zQ+Z`u2Y#W?1UU(NgzQeE{%($~9E1a(sK!RFh?f*sN8{pr4(LhJ9;{QnI* z+!yn!2FNNX_5N1X)&|@owncyz=X%wG)Y8$xD_*aA)#^PKnRIjMI`Lw?u{;jwvz+oXl&Da-vX^DJb`kI`*Crog$NLc%fwAd+0Es- z(ic0(4d^xWW^l3ux%=k7w(9NSpUFw>&&D#$?eX^GXTncWGl9~I>*^{v(3#gB@;8|7Mp1( zG;rqy(mQBJ>to_f@AVZR`SK1+NT}Q#-4pH;JuwATOEKSQ)Kr9#=ocqdyAw0lDPds~ z_H~v-cQ-p$E?hZk7B$%os!e-W+5}BWpR&GWxU-avkujMy$v~t!cpInrzR44Nqvh5Z zB}!4dWAGic1C5;1$2Qwdd284aXOAl@>2SVg%v0CaUO&?u3O8`0~cpSZ~&i^8_}W~JWA87kwRSMu-k86bJSgdIt* zn_d0bkQ2w2%nV)eRtFZuQ(soEC6ScGGdH16_O_f|i)VM|vcU-e0bwNaX)CUt?l9T; z1*~)FoEj4|5uvMZWo@0_;H}eJEwkghYH4+|CkE>yDL^EBv4k3SE54WgH{J=mqO#cU zQ2Oylz$I}@bVy0$fP7`vQJuc)PnQ^R zG;E1t%#v8>=N^?iWX4Jt>HcSOU1{k1&rB&Z1;W|S&(+kOOgdptRP9(?U+Na>SNWVC z-CWJcLQt>k=fje#7i0T7r*4XUj&?2=nF;0{uTsj&PKfbrJIb!3RDI8xIO;Z0NqYqC z%&}3aA7N4M-zmZq%!v~|o$j!Tb}QEaqya(uH%WH4$%P3hK@N*h+`rHs}X(=hYy_j;7oW7OS#kEv-BkE^Q4iDe_HSEGwzDMjJw!{=Oc>Bb7 zh^I&4+TFPn3oMh0T zCh}&IzWHn(acg`-p=J*n{LO!%Cz$A#@13Cr$HzCTUpUBCrIU0eSQSi#%N+wT7*9qD z<&wEoHow;f<>z+qEeG+4i`yXh>At376ac z@54mu-z2-w{`~39&S(KEnIaK_0Fjj<^$*(rr%ll6$X$fA(NC@^2I{SG|9{#xz? zd+;E4dDCs}t=yxh`g;VG>D2tTPoF+LiMEo2##(#K8Yx6NYMOz0;Zr)KeGc+HKN$&R z@8J3eB94E_@qo~8yk^Cxn--Z2(jZV#Xng&E4EnZPIR=!=3`E_>p%OJqDF`=!;My`m zlK0Z8HC=GfDl12Z1|7d|zYL|nP0rMmyiI3Xo_iNWCDT%;tt&p+RMVD7*8S-3=C*HF?B+Hd zDKsj6jW4{k4Ww4Ui8ttgX+-eULR7%45^E_>Mq7|T-K+7q1TK=mPRiba+A7?1``Hm` zc7i7Tvy5F&&nQ~ROau(Yh{^9|?BTblcWc6x+xMD%_`ANiY11BRMc5}sqpaL<(bOfB zjJ=16om=;A?ktp_Q5C>Ad0mQMKP~2B!Xk@R#P))*!au$!kYj4lsSlokPgjK4|^; zZ}P!EhZRC!LrA{LWwP~%#di8h#eSV+5KPZt$dwT*=XzOEs8Fb;?pjPlQry}F=9tlP z%YxhV{!(?{;whJyR@7}{L4tXa0&VlgxGY&8L^wr#oB$ zGpqV2T<(65xvtIlp{S6sx+N*01J)9gx-Bb)N0gLYIv}}Ekz8#tzITY0R)V~kQo>>C zOI*)y0{-qflVI;#^8F7vNO`Qhh^%^CcC>ZBCTv>%4)*W_JR`SPqOgVvpWY3{#G+Sc z`@B$Uqm*X_*I^;8Ko@q@OjlEf+>@)7D@6*UZPH#YXd`-wOrECqu_D-wi*SA5gCLK2 zhHk*3q8ycJDbMiW)63ogY&WhFI_SeaZR}whHbMuqr5vKn$wf4e1Su|-F&B;PJJ;l+ zxDl8|SP7VHrrs)HiuEu2E7yxdZo8GAqo&I1cT$dz$715;o1^_T{cKHyOIlD{E8H|m z0y;p#{ljMk*L-|^U6NW5jTQiDYoNp#By=&~9>>@yzDn)k-39zylHjQOU`Wx*rjC8xA+dlvF$A8%vA?U@(!&Qe))u_4*sT+4%(0mrh6w zC1M3@3n?5j@Qji)=0(XfCq?jUu%Pm z2CPEm^uX=-SRQ>{)`75)w)A-^26}LXBpeI(A)_*34BjAkMIoTEnrEO~dr0UY7MT!Y zmKsJ9V<4Ats@6hG=s*FkET(`fZiCcQXpG!30SW%r&sq}LyQWGRq%_T?spX}^;uA=& zEhR(cv+fGoJ3B2D#A(7pv?du;G9eJ`=h9&U`efTCtdBxNh>J1vI^wk*;@ES^B`HY> zp_IVB?^-bmVsUjW?Nipkr%YMmq*VY39k*1Nqy~V@7Bsu}OlW@-)Z=+|p|%WlLzKCm zCLZL|tIa=*u~D<_PIWF?_(Z#>u0ja4y@jCIv{)!SpJUqD*NEOvDCCKyP<3qll7s}g z3n5gZTN(S=nXFW7j8=^4#Buf<;5zXlTSX(o^bI*p7nx~*@zjQdhcGZ_16%=#O4#Ox z5~ESAh8^FzQmq1w`*QO3df&3bQD6rJrWej>AacUal7j(smOo%jgpdR~%V^Nv>BA69 zN`EO8)-m0ua$Y|TFV^uCNg4mG#sv>q^atXRY@O~?;b=Ng39*0Du`rkDI`#1=yp-7VVHYo$qE`1) z$@jgzpd#|*&_H<*z|#@Dsl)T;^HPt?kHpx)m7{=>o{=eAi}*x>rpC^ll>#HXwmP|X ziaU-_7K;H@DfpUg(&r zifY4#w{2}B><8%VYXlrG-G&h&;CC;@^lRi2mH1g`ZDB|V$-UearX@!C1kgtr+BekZ zp)kw!)4fYg-A6-{%-5ln*cc)tcpAp~$ww3$*4BuQStv{=5d=_x%pXo>)iZQy#6V&* zUeYMAc6Yzje!**cQ71+heSEUQ-Y8|NcAId1e(_&IFifPO)Ahs7DCUg*ofR4i_|99^ z$IoVF)12Tt!{m5n!kedPbyq%KKTKqfZ<(Q?NB`?N=IQ!L9J|_3nAq;K?ujOs)(7zD z8x?D|izwP9e3@%ZT(k>6%g>)r_w0P){<>Ff85(YGY+KN%+4k;k_)J*P{T`S~9li$J zl?8#d6jTy`5m~Ek$%&qer+XDBf>WWz6&Lo!e(V?05^p;=0 zY&P=X1ww$H{SnUupEZ(d$E0w;2M5TeLXCdqZpB!3Qm96S=`ZW8^&QLHp7y=6vN`38 zfakR>wNX+RUgpx+ZX#ZPH)w=ahZoXw%7|dQWcC6!L+H)8TGdmaK{fq`T2KVfG?0&d zk8;JO>koMHilS;iQ`$5mCgOv)h7c2@&8O?@NllUvO`I~(r_a8#NM})yI@z)w*T)wi zO7@b%xhEGdg=*{a?$AEoFY&f?yCZqqB z`a$b8^|XE)Io`gh?@b5?1Jb|Q9w$~xsIV8KFSN{e0` zvKck9Dy((k=kTg{i>kcpuj$wUZC| zY*L&^o!Y?ccYhK-c7BCF)B5|p5CvY@&M16a!QsW^Yt3AY^tprG3l+)4VoXCr>MCTT z=|pvF6d_s^=2IN}wkw*O<@s>>%eTz&i67x6c&_7oORtJAkjdx>38%#JZw&?EHwRNY zZ{!T56l6+Qaz+4$P$J!RiOB(xRjqb9Ao0Rxp0+RGfB5xIb@XqV%qLHqzdu`=rAgy@ zdAf=r(_NxJF z8W_e5hr6crGXvx+QNP}Orpy`s>k!@9+FR1Ub9_-VwQ<{%Ont-*F~!n?+N86xNIY#@ zP`E-c{~T-5+tsstVCQP1vQ5SAjfO8yVpPjqxyx}T&3Oiceamwbixx=I7+@in8m%1%5pyp7D444jG~$(qU7G8O)em zyqwfpb-xF2*88w{BCXBVR9R2YQh$6C)zSB5fokN$RXx(MbeuDp4eWX`IvUyNGJ?BJ z@3yEWJ14$>hewMSmBkv(T6DZsTf@fi5%-zBAQaoc(GwO%Y}3tVIV`M(?+YKI=l&V# zv{`anJaxF9+pg2R#@F%`fuE#pmw4A-G1u0Xg^&OPfaOXKX5i33hJ%Mu{;}I2EmYmM z8<1i%ej5!IzWVX_P7)UVHs8_1T29z!!8k>7n1$~<+->O5(P(pr<(F6YIfXZEb()X} zziEH}q)uZVYcg@1>*${L*RN=lu1$3>lg%B*5a9#e1UJ4SrU*Y-Ai~ z-n6|Z-ydiX&zz1?I1#pgBPrRd9kb>WJipcS;YLedMR)|Qoxjh-xbC0i4?wpV?B zx-2&T-x3!7uDflZDV#r1=La7)#Lox5U0h9Ts-lXro<-S=Y_r3 zv}MHu@^lttg8WvHNl+aaW4w!&{$wStBQm_kSJhqcsyUUj4D;S^f<5G>}{NB6BCELC0E}x@#d%mYkRG2tV;5j4@z6p5#f-VcHova!%6) zu$=uiBL=R&tCq=p4m!GHN%$%h}cy*7)ewg!{v;@sMIg~~m)BKdeePy@_;${IQT<{4$smHDT3gF{x zEYb^m%gWBcnXTpHAHlmaas^-48 zEzt1rmvRRC!2HAY`F3pFZ%suZ5s~Yn1hZkg<5uAoeP-+L?p^A;3v#yvEuLUuc(w0X z4)lI{-Trp2-rXh)zDT>FqLX7*C!81}R|XVkw&UCBHk0v|9XbKy29N!Y3OYwQ@?zu@ zzEW8M1;_Egp?iZCsO*L^$ITD+k3al=UsPM4!d;LrGx((FoB~2BM9^n)6>s&&2ZoGp$0PYDTdUa-6X2GL>#zKejbL zDF$fmY&e6AyF3Fw056Ho>g2aUzzJo+Wbp5Zy4A974{AUX`HzD{AnczSMo8#Da;r6g zW1i(!=>kWa`mL^Ydt}}!SpWaODE=REkL*Yi?MU)L^a7sIGtk9=g7*m6vmovCYqw0C zC(XWSt{CBd$Y#4pI59VMGC;!;06$BGDYDW-tI%dZ&vckT&0yQ2o%>mHQGNpV=l|W8 zyH$L0s9)lV@E?8#z9YNr_OC1?K@6)b&`Up;NcX=PGGrq)@l@Gp}{5NImFt6u0G- z?s%88#ROWH53_g_)$n$=|IR3`h{?op6}=Zj4JIfS(n!4ATlSf3DYV6Ws6LD@QPG7Y8UbgIB zcK;e&NK{Puf4=$u(%}?_N95rhg{8=e0oCRI$D9`ydVZKj{p*;Jdqd8!e+B=4*H`5W zfK`T_x8ri0qP(91@d%;Ky!$B}R3{bUP6WIz0-h$%9Hi7?8KEn7+PItc6>g!CFt2YJ z;r^Gxsh7g(H^l)PqBjLgEHTb0Nx`c|B)B7qDgr(c0iOyKhfyKKC|9^^F0UC$jf3X5l;G2aPGIty-qNd#<~EcRM|V`z-h0UMm1Y)%YHiKbt(wQPX0<#o5+T&xlgQ zNPRKKSytoC?dD|v>V}9&crIqL--;i0^HuXnB%Ds3J1U}$|K}YGOwBvKDy7Iedf~ZJ z61RtL;=4M}1a~XoIjrM2j$Zh2QFdxYeXrPd8E3tlO=sOr0^!TOa&~>KC$PNJJ4^c) z{NQW;tv2>us4!Qpa9)nyYZqWiT-j4LaU3lbBiV{P69E*uHBeyeUjty^A<*=2Rv1}= zCe0?^%J`t+zNA?ZJ;9ieZ9vhUD(B40aW^hKdq|PC<(5J1J8zOg=eR1U#L(!K*4o2$MuVt}cr{^YWO<S77$pk3~Y8oL;xXd7q##R zDZcA&f;)>|gy~}!p=&zvR+EpwCISxh81YdQjrFvNQQs?~y`@7?n!YE_ufAh>_^FPf zu59D0RxT$Q9~t*G`382q2FsEGbX}c-N`U?KOXsg<%X?GG!q}&jyBY9h>Isx5ef`YZ zNV5hQTjWbQhYMhff~eT-A3aBc>|vAf8<6$^n~7=c&HJ9O%i$Je_P8Ima6XgK=hVTkWUyW3N#v|2_t%WB)YDOZNM6($IYh?3K2CT(J15% z-)6KOCBg{%8pDg|bVrF+Q6bS$QweNJP`6cx89Er8-3x>8FL~6Cpi2`9XOYmQxOfhBYL_x$xcR>f2@3sfc=o^Fm>EDe* zRq!wJ@I3pc&V+)Y9f{>m0&bhx&99pU1AWubXIt^lt{JI}RhoR&O=$qfvdY@W{!z1r!T z5L9ApH25Mpd(pi8!x?emXto5p@a*1e_k3v%E9@W?&=*9J(`LG6n(f{6@VF@^1N zn*_z(mWRP}tN&<5u?(ag|Il42p18vK@zl}F5LgerdZYT3_vU+Ao8~fpDqC;@HnH17 v_UBRH!GA `from mlxtend.evaluate import bias_variance_decomp` " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Overview" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Often, researchers use the terms *bias* and *variance* or \"bias-variance tradeoff\" to describe the performance of a model -- i.e., you may stumble upon talks, books, or articles where people say that a model has a high variance or high bias. So, what does that mean? In general, we might say that \"high variance\" is proportional to overfitting, and \"high bias\" is proportional to underfitting.\n", + "\n", + "Anyways, why are we attempting to do this bias-variance decomposition in the first place? The decomposition of the loss into bias and variance helps us understand learning algorithms, as these concepts are correlated to underfitting and overfitting.\n", + "\n", + "\n", + "To use the more formal terms for bias and variance, assume we have a point estimator $\\hat{\\theta}$ of some parameter or function $\\theta$. Then, the bias is commonly defined as the difference between the expected value of the estimator and the parameter that we want to estimate:\n", + "\n", + "\n", + "$$\n", + "\\text{Bias} = E[\\hat{\\theta}] - \\theta.\n", + "$$\n", + "\n", + "If the bias is larger than zero, we also say that the estimator is positively biased, if the bias is smaller than zero, the estimator is negatively biased, and if the bias is exactly zero, the estimator is unbiased. Similarly, we define the variance as the difference between the expected value of the squared estimator minus the squared expectation of the estimator:\n", + "\n", + "\n", + "$$\n", + "\\text{Var}(\\hat{\\theta}) = E\\big[\\hat{\\theta}^2\\big] - \\bigg(E\\big[\\hat{\\theta}\\big]\\bigg)^2.\n", + "$$\n", + "\n", + "Note that in the context of this lecture, it will be more convenient to write the variance in its alternative form:\n", + "\n", + "$$\n", + "\\text{Var}(\\hat{\\theta}) = E[(E[{\\hat{\\theta}}] - \\hat{\\theta})^2].\n", + "$$\n", + "\n", + "To illustrate the concept further in context of machine learning ...\n", + "\n", + "Suppose there is an unknown target function or \"true function\" to which we do want to approximate. Now, suppose we have different training sets drawn from an unknown distribution defined as \"true function + noise.\" The following plot shows different linear regression models, each fit to a different training set. None of these hypotheses approximate the true function well, except at two points (around x=-10 and x=6). Here, we can say that the bias is large because the difference between the true value and the predicted value, on average (here, average means \"expectation of the training sets\" not \"expectation over examples in the training set\"), is large:\n", + "\n", + "\n", + "![](bias_variance_decomp_files/high-bias-plot.png)\n", + "\n", + "The next plot shows different unpruned decision tree models, each fit to a different training set. Note that these hypotheses fit the training data very closely. However, if we would consider the expectation over training sets, the average hypothesis would fit the true function perfectly (given that the noise is unbiased and has an expected value of 0). As we can see, the variance is very large, since on average, a prediction differs a lot from the expectation value of the prediction:\n", + "\n", + "\n", + "![](bias_variance_decomp_files/varianceplot.png)\n", + "\n", + "\n", + "### Bias-Variance Decomposition of the Squared Loss\n", + "\n", + "\n", + "We can decompose a loss function such as the squared loss into three terms, a variance, bias, and a noise term (and the same is true for the decomposition of the 0-1 loss later). However, for simplicity, we will ignore the noise term.\n", + "\n", + "Before we introduce the bias-variance decomposition of the 0-1 loss for classification, let us start with the decomposition of the squared loss as an easy warm-up exercise to get familiar with the overall concept.\n", + "\n", + "The previous section already listed the common formal definitions of bias and variance, however, let us define them again for convenience:\n", + "\n", + "\n", + "\n", + "$$\n", + "\\text{Bias}(\\hat{\\theta}) = E[\\hat{\\theta}] - \\theta, \\quad \\text{Var}(\\hat{\\theta}) = E[(E[{\\hat{\\theta}}] - \\hat{\\theta})^2].\n", + "$$\n", + "\n", + "Recall that in the context of these machine learning lecture (notes), we defined \n", + "\n", + "- the true or target function as $y = f(x)$,\n", + "- the predicted target value as $\\hat{y} = \\hat{f}(x) = h(x)$,\n", + "- and the squared loss as $S = (y - \\hat{y})^2$. (I use $S$ here because it will be easier to tell it apart from the $E$, which we use for the *expectation* in this lecture.)\n", + "\n", + "**Note that unless noted otherwise, the expectation is over training sets!**\n", + "\n", + "To get started with the squared error loss decomposition into bias and variance, let use do some algebraic manipulation, i.e., adding and subtracting the expected value of $\\hat{y}$ and then expanding the expression using the quadratic formula $(a+b)^2 = a^2 + b^2 + 2ab)$:\n", + "\n", + "$$\n", + "\\begin{equation}\n", + "\\begin{split}\n", + "S = (y - \\hat{y})^2 \\\\\n", + "(y - \\hat{y})^2 &= (y - E[{\\hat{y}}] + E[{\\hat{y}}] - \\hat{y})^2 \\\\\n", + "&= (y-E[{\\hat{y}}])^2 + (E[{\\hat{y}}] - y)^2 + 2(y - E[\\hat{y}])(E[\\hat{y}] - \\hat{y}). \n", + "\\end{split}\n", + "\\end{equation}\n", + "$$\n", + "\n", + "Next, we just use the expectation on both sides, and we are already done:\n", + "\n", + "\n", + "$$\n", + "\\begin{align}\n", + "E[S] &= E[(y - \\hat{y})^2] \\\\\n", + "E[(y - \\hat{y})^2]\n", + "&= (y-E[{\\hat{y}}])^2 + E[(E[{\\hat{y}}] - \\hat{y})^2]\\\\\n", + "&= \\text{[Bias]}^2 + \\text{Variance}.\n", + "\\end{align}\n", + "$$\n", + "\n", + "You may wonder what happened to the \"$2ab$\" term ($2(y - E[\\hat{y}])(E[\\hat{y}] - \\hat{y})$) when we used the expectation. It turns that it evaluates to zero and hence vanishes from the equation, which can be shown as follows:\n", + "\n", + "$$\n", + "\\begin{align}\n", + "E[2(y - E[{\\hat{y}}])(E[{\\hat{y}}] - \\hat{y})] &= 2 E[(y - E[{\\hat{y}}])(E[{\\hat{y}}] - \\hat{y})] \\\\\n", + "&= 2(y - E[{\\hat{y}}])E[(E[{\\hat{y}}] - \\hat{y})] \\\\\n", + "&= 2(y - E[{\\hat{y}}])(E[E[{\\hat{y}}]] - E[\\hat{y}])\\\\\n", + "&= 2(y - E[{\\hat{y}}])(E[{\\hat{y}}] - E[{\\hat{y}}]) \\\\\n", + "&= 0.\n", + "\\end{align}\n", + "$$\n", + "\n", + "\n", + "So, this is the canonical decomposition of the squared error loss into bias and variance. The next section will discuss some approaches that have been made to decompose the 0-1 loss that we commonly use for classification accuracy or error.\n", + "\n", + "\n", + "\n", + "The following figure is a sketch of variance and bias in relation to the training error and generalization error -- how high variance related to overfitting, and how large bias relates to underfitting:\n", + "\n", + "![](bias_variance_decomp_files/image-20181029010428686.png)\n", + "\n", + "\n", + "\n", + "\n", + "### Bias-Variance Decomposition of the 0-1 Loss\n", + "\n", + "\n", + "\n", + "Note that decomposing the 0-1 loss into bias and variance components is not as straight-forward as for the squared error loss. To quote Pedro Domingos, a well-known machine learning researcher and professor at University of Washington: \n", + "\n", + "> \"several authors have proposed bias-variance decompositions related to zero-one loss (Kong & Dietterich, 1995; Breiman, 1996b; Kohavi & Wolpert, 1996; Tibshirani, 1996; Friedman, 1997). However, each of these decompositions has significant shortcomings.\". [1] \n", + "\n", + "\n", + "In fact, the paper this quote was taken from may offer the most intuitive and general formulation at this point. However, we will first, for simplicity, go over Kong & Dietterich formulation [2] of the 0-1 loss decomposition, which is the same as Domingos's but excluding the noise term (for simplicity). \n", + "\n", + "The table below summarizes the relevant terms we used for the squared loss in relation to the 0-1 loss. Recall that the 0-1 loss, $L$, is 0 if a class label is predicted correctly, and one otherwise. The main prediction for the squared error loss is simply the average over the predictions $E[\\hat{y}]$ (the expectation is over training sets), for the 0-1 loss Kong & Dietterich and Domingos defined it as the mode. I.e., if a model predicts the label one more than 50% of the time (considering all possible training sets), then the main prediction is 1, and 0 otherwise.\n", + "\n", + "\n", + "\n", + "\n", + "| - | Squared Loss | 0-1 Loss |\n", + "|------------------------------|---------------------------------|-----------------------------|\n", + "| Single loss | $(y - \\hat{y})^2$ | $L(y, \\hat{y})$ |\n", + "| Expected loss | $E[(y - \\hat{y})^2]$ | $E[L(y, \\hat{y})]$ |\n", + "| Main prediction $E[\\hat{y}]$ | mean (average) | mode |\n", + "| Bias$^2$ | $(y-E[{\\hat{y}}])^2$ | $L(y, E[\\hat{y}])$ |\n", + "| Variance | $E[(E[{\\hat{y}}] - \\hat{y})^2]$ | $E[L(\\hat{y}, E[\\hat{y}])]$ |\n", + "\n", + "\n", + "\n", + "\n", + "Hence, as result from using the mode to define the main prediction of the 0-1 loss, the bias is 1 if the main prediction does not agree with the true label $y$, and 0 otherwise:\n", + "\n", + "\n", + "$$\n", + "Bias = \\begin{cases}\n", + "1 \\text{ if } y \\neq E[{\\hat{y}}], \\\\\n", + "0 \\text{ otherwise}.\n", + "\\end{cases}\n", + "$$\n", + "\n", + "The variance of the 0-1 loss is defined as the probability that the predicted label does not match the main prediction: \n", + "\n", + "$$\n", + "Variance = P(\\hat{y} \\neq E[\\hat{{y}}]).\n", + "$$\n", + "\n", + "Next, let us take a look at what happens to the loss if the bias is 0. Given the general definition of the loss, loss = bias + variance, if the bias is 0, then we define the loss as the variance: \n", + "\n", + "\n", + "$$\n", + "Loss = 0 + Variance = Loss = P(\\hat{y} \\neq y) = Variance = P(\\hat{y} \\neq E[\\hat{{y}}]).\n", + "$$\n", + "\n", + "In other words, if a model has zero bias, it's loss is entirely defined by the variance, which is intuitive if we think of variance in the context of being proportional overfitting.\n", + "\n", + "The more surprising scenario is if the bias is equal to 1. If the bias is equal to 1, as explained by Pedro Domingos, the increasing the variance can decrease the loss, which is an interesting observation. This can be seen by first rewriting the 0-1 loss function as \n", + "\n", + "$$\n", + "Loss = P(\\hat{y} \\neq y) = 1 - P(\\hat{y} = y).\n", + "$$\n", + "\n", + "(Note that we have not done anything new, yet.) Now, if we look at the previous equation of the bias, if the bias is 1, we have $ y \\neq E[{\\hat{y}}]$. If $y$ is not equal to the main prediction, but $y$ is also is equal to $\\hat{y}$, then $\\hat{y}$ must be equal to the main prediction. Using the \"inverse\" (\"1 minus\"), we can then write the loss as\n", + "\n", + "\n", + "$$\n", + "Loss = P(\\hat{y} \\neq y) = 1 - P(\\hat{y} = y) = 1 - P(\\hat{y} \\neq E[{\\hat{y}}]).\n", + "$$\n", + "\n", + "Since the bias is 1, the loss is hence defined as \"loss = bias - variance\" if the bias is 1 (or \"loss = 1 - variance\"). This might be quite unintuitive at first, but the explanations Kong, Dietterich, and Domingos offer was that if a model has a very high bias such that it main prediction is always wrong, increasing the variance can be beneficial, since increasing the variance would push the decision boundary, which might lead to some correct predictions just by chance then. In other words, for scenarios with high bias, increasing the variance can improve (decrease) the loss!\n", + "\n", + "### References\n", + "\n", + "- [1] Domingos, Pedro. \"A unified bias-variance decomposition.\" Proceedings of 17th International Conference on Machine Learning. 2000.\n", + "- [2] Dietterich, Thomas G., and Eun Bae Kong. Machine learning bias, statistical bias, and statistical variance of decision tree algorithms. Technical report, Department of Computer Science, Oregon State University, 1995." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 1 -- Bias Variance Decomposition of a Decision Tree Classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average expected loss: 0.062\n", + "Average bias: 0.022\n", + "Average variance: 0.040\n" + ] + } + ], + "source": [ + "from mlxtend.evaluate import bias_variance_decomp\n", + "from sklearn.tree import DecisionTreeClassifier\n", + "from mlxtend.data import iris_data\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "\n", + "X, y = iris_data()\n", + "X_train, X_test, y_train, y_test = train_test_split(X, y,\n", + " test_size=0.3,\n", + " random_state=123,\n", + " shuffle=True,\n", + " stratify=y)\n", + "\n", + "\n", + "\n", + "tree = DecisionTreeClassifier(random_state=123)\n", + "\n", + "avg_expected_loss, avg_bias, avg_var = bias_variance_decomp(\n", + " tree, X_train, y_train, X_test, y_test, \n", + " loss='0-1_loss',\n", + " random_seed=123)\n", + "\n", + "print('Average expected loss: %.3f' % avg_expected_loss)\n", + "print('Average bias: %.3f' % avg_bias)\n", + "print('Average variance: %.3f' % avg_var)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For comparison, the bias-variance decomposition of a bagging classifier, which should intuitively have a lower variance compared than a single decision tree:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average expected loss: 0.048\n", + "Average bias: 0.022\n", + "Average variance: 0.026\n" + ] + } + ], + "source": [ + "from sklearn.ensemble import BaggingClassifier\n", + "\n", + "tree = DecisionTreeClassifier(random_state=123)\n", + "bag = BaggingClassifier(base_estimator=tree,\n", + " n_estimators=100,\n", + " random_state=123)\n", + "\n", + "avg_expected_loss, avg_bias, avg_var = bias_variance_decomp(\n", + " bag, X_train, y_train, X_test, y_test, \n", + " loss='0-1_loss',\n", + " random_seed=123)\n", + "\n", + "print('Average expected loss: %.3f' % avg_expected_loss)\n", + "print('Average bias: %.3f' % avg_bias)\n", + "print('Average variance: %.3f' % avg_var)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 2 -- Bias Variance Decomposition of a Decision Tree Regressor" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average expected loss: 31.917\n", + "Average bias: 13.814\n", + "Average variance: 18.102\n" + ] + } + ], + "source": [ + "from mlxtend.evaluate import bias_variance_decomp\n", + "from sklearn.tree import DecisionTreeRegressor\n", + "from mlxtend.data import boston_housing_data\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "\n", + "X, y = boston_housing_data()\n", + "X_train, X_test, y_train, y_test = train_test_split(X, y,\n", + " test_size=0.3,\n", + " random_state=123,\n", + " shuffle=True)\n", + "\n", + "\n", + "\n", + "tree = DecisionTreeRegressor(random_state=123)\n", + "\n", + "avg_expected_loss, avg_bias, avg_var = bias_variance_decomp(\n", + " tree, X_train, y_train, X_test, y_test, \n", + " loss='mse',\n", + " random_seed=123)\n", + "\n", + "print('Average expected loss: %.3f' % avg_expected_loss)\n", + "print('Average bias: %.3f' % avg_bias)\n", + "print('Average variance: %.3f' % avg_var)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For comparison, the bias-variance decomposition of a bagging regressor is shown below, which should intuitively have a lower variance than a single decision tree:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Average expected loss: 18.593\n", + "Average bias: 15.354\n", + "Average variance: 3.239\n" + ] + } + ], + "source": [ + "from sklearn.ensemble import BaggingRegressor\n", + "\n", + "tree = DecisionTreeRegressor(random_state=123)\n", + "bag = BaggingRegressor(base_estimator=tree,\n", + " n_estimators=100,\n", + " random_state=123)\n", + "\n", + "avg_expected_loss, avg_bias, avg_var = bias_variance_decomp(\n", + " bag, X_train, y_train, X_test, y_test, \n", + " loss='mse',\n", + " random_seed=123)\n", + "\n", + "print('Average expected loss: %.3f' % avg_expected_loss)\n", + "print('Average bias: %.3f' % avg_bias)\n", + "print('Average variance: %.3f' % avg_var)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "## bias_variance_decomp\n", + "\n", + "*bias_variance_decomp(estimator, X_train, y_train, X_test, y_test, loss='0-1_loss', num_rounds=200, random_seed=None)*\n", + "\n", + "estimator : object\n", + "A classifier or regressor object or class implementing a `fit`\n", + "`predict` method similar to the scikit-learn API.\n", + "\n", + "\n", + "- `X_train` : array-like, shape=(num_examples, num_features)\n", + "\n", + " A training dataset for drawing the bootstrap samples to carry\n", + " out the bias-variance decomposition.\n", + "\n", + "\n", + "- `y_train` : array-like, shape=(num_examples)\n", + "\n", + " Targets (class labels, continuous values in case of regression)\n", + " associated with the `X_train` examples.\n", + "\n", + "\n", + "- `X_test` : array-like, shape=(num_examples, num_features)\n", + "\n", + " The test dataset for computing the average loss, bias,\n", + " and variance.\n", + "\n", + "\n", + "- `y_test` : array-like, shape=(num_examples)\n", + "\n", + " Targets (class labels, continuous values in case of regression)\n", + " associated with the `X_test` examples.\n", + "\n", + "\n", + "- `loss` : str (default='0-1_loss')\n", + "\n", + " Loss function for performing the bias-variance decomposition.\n", + " Currently allowed values are '0-1_loss' and 'mse'.\n", + "\n", + "\n", + "- `num_rounds` : int (default=200)\n", + "\n", + " Number of bootstrap rounds for performing the bias-variance\n", + " decomposition.\n", + "\n", + "\n", + "- `random_seed` : int (default=None)\n", + "\n", + " Random seed for the bootstrap sampling used for the\n", + " bias-variance decomposition.\n", + "\n", + "**Returns**\n", + "\n", + "- `avg_expected_loss, avg_bias, avg_var` : returns the average expected\n", + "\n", + " average bias, and average bias (all floats), where the average\n", + " is computed over the data points in the test set.\n", + "\n", + "\n" + ] + } + ], + "source": [ + "with open('../../api_modules/mlxtend.evaluate/bias_variance_decomp.md', 'r') as f:\n", + " s = f.read() \n", + "print(s)" + ] + } + ], + "metadata": { + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + }, + "toc": { + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": false + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/docs/_site/sources/user_guide/evaluate/bias_variance_decomp.md b/docs/_site/sources/user_guide/evaluate/bias_variance_decomp.md new file mode 100644 index 000000000..a57819f89 --- /dev/null +++ b/docs/_site/sources/user_guide/evaluate/bias_variance_decomp.md @@ -0,0 +1,371 @@ +# Bias-Variance Decomposition + +Bias variance decomposition of machine learning algorithms for various loss functions. + +> `from mlxtend.evaluate import bias_variance_decomp` + +## Overview + +Often, researchers use the terms *bias* and *variance* or "bias-variance tradeoff" to describe the performance of a model -- i.e., you may stumble upon talks, books, or articles where people say that a model has a high variance or high bias. So, what does that mean? In general, we might say that "high variance" is proportional to overfitting, and "high bias" is proportional to underfitting. + +Anyways, why are we attempting to do this bias-variance decomposition in the first place? The decomposition of the loss into bias and variance helps us understand learning algorithms, as these concepts are correlated to underfitting and overfitting. + + +To use the more formal terms for bias and variance, assume we have a point estimator $\hat{\theta}$ of some parameter or function $\theta$. Then, the bias is commonly defined as the difference between the expected value of the estimator and the parameter that we want to estimate: + + +$$ +\text{Bias} = E[\hat{\theta}] - \theta. +$$ + +If the bias is larger than zero, we also say that the estimator is positively biased, if the bias is smaller than zero, the estimator is negatively biased, and if the bias is exactly zero, the estimator is unbiased. Similarly, we define the variance as the difference between the expected value of the squared estimator minus the squared expectation of the estimator: + + +$$ +\text{Var}(\hat{\theta}) = E\big[\hat{\theta}^2\big] - \bigg(E\big[\hat{\theta}\big]\bigg)^2. +$$ + +Note that in the context of this lecture, it will be more convenient to write the variance in its alternative form: + +$$ +\text{Var}(\hat{\theta}) = E[(E[{\hat{\theta}}] - \hat{\theta})^2]. +$$ + +To illustrate the concept further in context of machine learning ... + +Suppose there is an unknown target function or "true function" to which we do want to approximate. Now, suppose we have different training sets drawn from an unknown distribution defined as "true function + noise." The following plot shows different linear regression models, each fit to a different training set. None of these hypotheses approximate the true function well, except at two points (around x=-10 and x=6). Here, we can say that the bias is large because the difference between the true value and the predicted value, on average (here, average means "expectation of the training sets" not "expectation over examples in the training set"), is large: + + +![](bias_variance_decomp_files/high-bias-plot.png) + +The next plot shows different unpruned decision tree models, each fit to a different training set. Note that these hypotheses fit the training data very closely. However, if we would consider the expectation over training sets, the average hypothesis would fit the true function perfectly (given that the noise is unbiased and has an expected value of 0). As we can see, the variance is very large, since on average, a prediction differs a lot from the expectation value of the prediction: + + +![](bias_variance_decomp_files/varianceplot.png) + + +### Bias-Variance Decomposition of the Squared Loss + + +We can decompose a loss function such as the squared loss into three terms, a variance, bias, and a noise term (and the same is true for the decomposition of the 0-1 loss later). However, for simplicity, we will ignore the noise term. + +Before we introduce the bias-variance decomposition of the 0-1 loss for classification, let us start with the decomposition of the squared loss as an easy warm-up exercise to get familiar with the overall concept. + +The previous section already listed the common formal definitions of bias and variance, however, let us define them again for convenience: + + + +$$ +\text{Bias}(\hat{\theta}) = E[\hat{\theta}] - \theta, \quad \text{Var}(\hat{\theta}) = E[(E[{\hat{\theta}}] - \hat{\theta})^2]. +$$ + +Recall that in the context of these machine learning lecture (notes), we defined + +- the true or target function as $y = f(x)$, +- the predicted target value as $\hat{y} = \hat{f}(x) = h(x)$, +- and the squared loss as $S = (y - \hat{y})^2$. (I use $S$ here because it will be easier to tell it apart from the $E$, which we use for the *expectation* in this lecture.) + +**Note that unless noted otherwise, the expectation is over training sets!** + +To get started with the squared error loss decomposition into bias and variance, let use do some algebraic manipulation, i.e., adding and subtracting the expected value of $\hat{y}$ and then expanding the expression using the quadratic formula $(a+b)^2 = a^2 + b^2 + 2ab)$: + +$$ +\begin{equation} +\begin{split} +S = (y - \hat{y})^2 \\ +(y - \hat{y})^2 &= (y - E[{\hat{y}}] + E[{\hat{y}}] - \hat{y})^2 \\ +&= (y-E[{\hat{y}}])^2 + (E[{\hat{y}}] - y)^2 + 2(y - E[\hat{y}])(E[\hat{y}] - \hat{y}). +\end{split} +\end{equation} +$$ + +Next, we just use the expectation on both sides, and we are already done: + + +$$ +\begin{align} +E[S] &= E[(y - \hat{y})^2] \\ +E[(y - \hat{y})^2] +&= (y-E[{\hat{y}}])^2 + E[(E[{\hat{y}}] - \hat{y})^2]\\ +&= \text{[Bias]}^2 + \text{Variance}. +\end{align} +$$ + +You may wonder what happened to the "$2ab$" term ($2(y - E[\hat{y}])(E[\hat{y}] - \hat{y})$) when we used the expectation. It turns that it evaluates to zero and hence vanishes from the equation, which can be shown as follows: + +$$ +\begin{align} +E[2(y - E[{\hat{y}}])(E[{\hat{y}}] - \hat{y})] &= 2 E[(y - E[{\hat{y}}])(E[{\hat{y}}] - \hat{y})] \\ +&= 2(y - E[{\hat{y}}])E[(E[{\hat{y}}] - \hat{y})] \\ +&= 2(y - E[{\hat{y}}])(E[E[{\hat{y}}]] - E[\hat{y}])\\ +&= 2(y - E[{\hat{y}}])(E[{\hat{y}}] - E[{\hat{y}}]) \\ +&= 0. +\end{align} +$$ + + +So, this is the canonical decomposition of the squared error loss into bias and variance. The next section will discuss some approaches that have been made to decompose the 0-1 loss that we commonly use for classification accuracy or error. + + + +The following figure is a sketch of variance and bias in relation to the training error and generalization error -- how high variance related to overfitting, and how large bias relates to underfitting: + +![](bias_variance_decomp_files/image-20181029010428686.png) + + + + +### Bias-Variance Decomposition of the 0-1 Loss + + + +Note that decomposing the 0-1 loss into bias and variance components is not as straight-forward as for the squared error loss. To quote Pedro Domingos, a well-known machine learning researcher and professor at University of Washington: + +> "several authors have proposed bias-variance decompositions related to zero-one loss (Kong & Dietterich, 1995; Breiman, 1996b; Kohavi & Wolpert, 1996; Tibshirani, 1996; Friedman, 1997). However, each of these decompositions has significant shortcomings.". [1] + + +In fact, the paper this quote was taken from may offer the most intuitive and general formulation at this point. However, we will first, for simplicity, go over Kong & Dietterich formulation [2] of the 0-1 loss decomposition, which is the same as Domingos's but excluding the noise term (for simplicity). + +The table below summarizes the relevant terms we used for the squared loss in relation to the 0-1 loss. Recall that the 0-1 loss, $L$, is 0 if a class label is predicted correctly, and one otherwise. The main prediction for the squared error loss is simply the average over the predictions $E[\hat{y}]$ (the expectation is over training sets), for the 0-1 loss Kong & Dietterich and Domingos defined it as the mode. I.e., if a model predicts the label one more than 50% of the time (considering all possible training sets), then the main prediction is 1, and 0 otherwise. + + + + +| - | Squared Loss | 0-1 Loss | +|------------------------------|---------------------------------|-----------------------------| +| Single loss | $(y - \hat{y})^2$ | $L(y, \hat{y})$ | +| Expected loss | $E[(y - \hat{y})^2]$ | $E[L(y, \hat{y})]$ | +| Main prediction $E[\hat{y}]$ | mean (average) | mode | +| Bias$^2$ | $(y-E[{\hat{y}}])^2$ | $L(y, E[\hat{y}])$ | +| Variance | $E[(E[{\hat{y}}] - \hat{y})^2]$ | $E[L(\hat{y}, E[\hat{y}])]$ | + + + + +Hence, as result from using the mode to define the main prediction of the 0-1 loss, the bias is 1 if the main prediction does not agree with the true label $y$, and 0 otherwise: + + +$$ +Bias = \begin{cases} +1 \text{ if } y \neq E[{\hat{y}}], \\ +0 \text{ otherwise}. +\end{cases} +$$ + +The variance of the 0-1 loss is defined as the probability that the predicted label does not match the main prediction: + +$$ +Variance = P(\hat{y} \neq E[\hat{{y}}]). +$$ + +Next, let us take a look at what happens to the loss if the bias is 0. Given the general definition of the loss, loss = bias + variance, if the bias is 0, then we define the loss as the variance: + + +$$ +Loss = 0 + Variance = Loss = P(\hat{y} \neq y) = Variance = P(\hat{y} \neq E[\hat{{y}}]). +$$ + +In other words, if a model has zero bias, it's loss is entirely defined by the variance, which is intuitive if we think of variance in the context of being proportional overfitting. + +The more surprising scenario is if the bias is equal to 1. If the bias is equal to 1, as explained by Pedro Domingos, the increasing the variance can decrease the loss, which is an interesting observation. This can be seen by first rewriting the 0-1 loss function as + +$$ +Loss = P(\hat{y} \neq y) = 1 - P(\hat{y} = y). +$$ + +(Note that we have not done anything new, yet.) Now, if we look at the previous equation of the bias, if the bias is 1, we have $ y \neq E[{\hat{y}}]$. If $y$ is not equal to the main prediction, but $y$ is also is equal to $\hat{y}$, then $\hat{y}$ must be equal to the main prediction. Using the "inverse" ("1 minus"), we can then write the loss as + + +$$ +Loss = P(\hat{y} \neq y) = 1 - P(\hat{y} = y) = 1 - P(\hat{y} \neq E[{\hat{y}}]). +$$ + +Since the bias is 1, the loss is hence defined as "loss = bias - variance" if the bias is 1 (or "loss = 1 - variance"). This might be quite unintuitive at first, but the explanations Kong, Dietterich, and Domingos offer was that if a model has a very high bias such that it main prediction is always wrong, increasing the variance can be beneficial, since increasing the variance would push the decision boundary, which might lead to some correct predictions just by chance then. In other words, for scenarios with high bias, increasing the variance can improve (decrease) the loss! + +### References + +- [1] Domingos, Pedro. "A unified bias-variance decomposition." Proceedings of 17th International Conference on Machine Learning. 2000. +- [2] Dietterich, Thomas G., and Eun Bae Kong. Machine learning bias, statistical bias, and statistical variance of decision tree algorithms. Technical report, Department of Computer Science, Oregon State University, 1995. + +## Example 1 -- Bias Variance Decomposition of a Decision Tree Classifier + + +```python +from mlxtend.evaluate import bias_variance_decomp +from sklearn.tree import DecisionTreeClassifier +from mlxtend.data import iris_data +from sklearn.model_selection import train_test_split + + +X, y = iris_data() +X_train, X_test, y_train, y_test = train_test_split(X, y, + test_size=0.3, + random_state=123, + shuffle=True, + stratify=y) + + + +tree = DecisionTreeClassifier(random_state=123) + +avg_expected_loss, avg_bias, avg_var = bias_variance_decomp( + tree, X_train, y_train, X_test, y_test, + loss='0-1_loss', + random_seed=123) + +print('Average expected loss: %.3f' % avg_expected_loss) +print('Average bias: %.3f' % avg_bias) +print('Average variance: %.3f' % avg_var) +``` + + Average expected loss: 0.062 + Average bias: 0.022 + Average variance: 0.040 + + +For comparison, the bias-variance decomposition of a bagging classifier, which should intuitively have a lower variance compared than a single decision tree: + + +```python +from sklearn.ensemble import BaggingClassifier + +tree = DecisionTreeClassifier(random_state=123) +bag = BaggingClassifier(base_estimator=tree, + n_estimators=100, + random_state=123) + +avg_expected_loss, avg_bias, avg_var = bias_variance_decomp( + bag, X_train, y_train, X_test, y_test, + loss='0-1_loss', + random_seed=123) + +print('Average expected loss: %.3f' % avg_expected_loss) +print('Average bias: %.3f' % avg_bias) +print('Average variance: %.3f' % avg_var) +``` + + Average expected loss: 0.048 + Average bias: 0.022 + Average variance: 0.026 + + +## Example 2 -- Bias Variance Decomposition of a Decision Tree Regressor + + +```python +from mlxtend.evaluate import bias_variance_decomp +from sklearn.tree import DecisionTreeRegressor +from mlxtend.data import boston_housing_data +from sklearn.model_selection import train_test_split + + +X, y = boston_housing_data() +X_train, X_test, y_train, y_test = train_test_split(X, y, + test_size=0.3, + random_state=123, + shuffle=True) + + + +tree = DecisionTreeRegressor(random_state=123) + +avg_expected_loss, avg_bias, avg_var = bias_variance_decomp( + tree, X_train, y_train, X_test, y_test, + loss='mse', + random_seed=123) + +print('Average expected loss: %.3f' % avg_expected_loss) +print('Average bias: %.3f' % avg_bias) +print('Average variance: %.3f' % avg_var) +``` + + Average expected loss: 31.917 + Average bias: 13.814 + Average variance: 18.102 + + +For comparison, the bias-variance decomposition of a bagging regressor is shown below, which should intuitively have a lower variance than a single decision tree: + + +```python +from sklearn.ensemble import BaggingRegressor + +tree = DecisionTreeRegressor(random_state=123) +bag = BaggingRegressor(base_estimator=tree, + n_estimators=100, + random_state=123) + +avg_expected_loss, avg_bias, avg_var = bias_variance_decomp( + bag, X_train, y_train, X_test, y_test, + loss='mse', + random_seed=123) + +print('Average expected loss: %.3f' % avg_expected_loss) +print('Average bias: %.3f' % avg_bias) +print('Average variance: %.3f' % avg_var) +``` + + Average expected loss: 18.593 + Average bias: 15.354 + Average variance: 3.239 + + +## API + + +*bias_variance_decomp(estimator, X_train, y_train, X_test, y_test, loss='0-1_loss', num_rounds=200, random_seed=None)* + +estimator : object +A classifier or regressor object or class implementing a `fit` +`predict` method similar to the scikit-learn API. + + +- `X_train` : array-like, shape=(num_examples, num_features) + + A training dataset for drawing the bootstrap samples to carry + out the bias-variance decomposition. + + +- `y_train` : array-like, shape=(num_examples) + + Targets (class labels, continuous values in case of regression) + associated with the `X_train` examples. + + +- `X_test` : array-like, shape=(num_examples, num_features) + + The test dataset for computing the average loss, bias, + and variance. + + +- `y_test` : array-like, shape=(num_examples) + + Targets (class labels, continuous values in case of regression) + associated with the `X_test` examples. + + +- `loss` : str (default='0-1_loss') + + Loss function for performing the bias-variance decomposition. + Currently allowed values are '0-1_loss' and 'mse'. + + +- `num_rounds` : int (default=200) + + Number of bootstrap rounds for performing the bias-variance + decomposition. + + +- `random_seed` : int (default=None) + + Random seed for the bootstrap sampling used for the + bias-variance decomposition. + +**Returns** + +- `avg_expected_loss, avg_bias, avg_var` : returns the average expected + + average bias, and average bias (all floats), where the average + is computed over the data points in the test set. + + diff --git a/docs/_site/sources/user_guide/evaluate/bias_variance_decomp_files/high-bias-plot.png b/docs/_site/sources/user_guide/evaluate/bias_variance_decomp_files/high-bias-plot.png new file mode 100644 index 0000000000000000000000000000000000000000..04829e9f10d615119a06e2364e153eb2e0755634 GIT binary patch literal 324935 zcmeFaWmHw``aY}(ieP}0qzIxC3#6q(0Rd@PG>CK~-Rz|z2oj=nN-pW{l8_GRM!Gu| z&HtI6v(I*)qrdI`_P%564>A~wx#oQ0j_bbe>zTfml@_~dOIfF%i z=FIsf%=6$Ap3G#_nKNha8b5d-Yx+R!fu)(HwStw7uD+0-f!B(JP`YIs~lRn?&@!Nf#k~ri`1cLcoBbGdUl2pZ^x8E8#Amiq?oC zc@+{u7;)-;MW?n{@B6mXxKyt;G=?cma-!`WM*guz07 zI(jSyPbljyoMHVSp_`W!8U&W^qSYf+n=@w^siA*o9g_s?&zuoFBmPiG(edo^=!J3( zr4r7s!DBaH;+vp-ka&$FfrBA*R!%U&J6{qPJ?YWgG{?7T@|i>gvN-XSK3&2}4;SL` zlf*C5z%JO4On{1a6F*IWMQlYfHh|1X1T zDcsif1sQ8_DjJn}%eB9Gk=j5L0Ds9(uXK?tNacA;k96Y z5Cu6prmYd8JvxuI^-ip-H3{?6hhBxxHh$$cGkA{6ISpMRM4CND)r^{@Y|d*^$C#M( zeH|6HH0aG#fkl_L+j-Wa>__eTbE9f0f90O&2Rlp3*Wj)6`66k%7`c+j-V?hLw7-t; zzjH@%66h}p(xyej&i}Ph|9dx)%4c=zoh$M(eSg2nUv9JF>)*1U29IEN|3dBjZ%f>v zqfA%Ha_FoFQ8dw4?Egk9%Qv!O;6S&AK#29VbiIPC{emC@iP~;RYYJ? zOULr_NkwzBw}mk$;K(l{2{R-A@DD@?_V)Z2>YbHg&Wz$oAKDsU62#0+%JgF1p6GXf zT=v~?Aj9mugutYg4rigMI6XO}O{F#Y-daIIj^eVBKXP6Fbm61J+L-bB*H4}T>Z~EU znE7zvLj~6U)IW70MOn%aC-%I_1ZSPB0uW68?vq+o~Y;Pyzkg9(*v3 z;_O)wM>zYmWL2kmR5y)M+~3BTzfQZurD#p?~m_u zhg8Rl_+ojSH90xjr+x0tYS_+j#Fn)FMeIh>$?;(dzuN&@L8LkQ&@IaJf!q8Se!d{h z)ZZ-HX>)qy6IJ4UtFP5;PpZ#M)M?O={;@VVMt8Gou;}r}+QV7zNJX&GN=0&HOw5XA z-I0z8dv*237_x&DZ3aL4k}Vgz-_Ns?QBw7LS_OGU!6p-2MKW$%BN8^Fz__P`f&AYk zjtKnuF9~95Hf`+K9%tz+8;~{kP5aWz+YT<~s{&JOdwKp(OZO);&)1|3(QGkI%PdWu z8Q8Si{%+cxW{6KOE~L00AIz+d_f&;WQ|O&T7=X~tf!A#D$HIO$XXr5qiHmhXW;S!} zZDf@$sMU`irzhdVw%5KJL0l5rgIAZD`1B687fOcBC_CXGO%-Bs$NXUoe;Ok2L{UU? zh#Kj=CxWD64)?$R;|UDM<7TSmc;)G;(K_A9wch6#*d*JI;&HAZTh8RMUHksxKaDg_ z2`$gCqn$4kI4?Ac?_5dH6g}bq^(Oh`IFWtM`Vs=d9g??>#D5lma8#txw&prI_-q0w z_^gWtzq{2LL?0#;%~07+TeCA3c?dyufcl2x=YHN06eLF9tnmSH5QH;3x?18(-)C=x z2e$1(*UY%3kJilbq4*bzv1>*-0OR;Jk$wNvKhOGZ7G(M?7nVl`J(`?n-Q6WzE}pO*d4_xb+w6a+Vc(IS&5G@@wv;&gj_ z5Cz1{Qhpu<{u^77%K0L;5OxwPO>4K>LuZHV4$*nrW+Ue}cE7PuMq0A5e$us`gLga(9uU~VQ ze`I_OT{^_L{*`qtnSRZgzk)+YE%E~CX}cJ$duI8eXFe+BRf?NHg~j5R43K=-|MJeD zOn`$WGo6tRzw>P-zSU*EX;fOaTLU#g9W(qB+p1e#YirIoM>hsY7f4+!B1GNHf~{j~ z9RG4J&LC3p=QV=!SZKcm(rhyHlilgzh2q%b3M!%cSD~y6wW_Gv;=3&k}$q>sl(HS8(GPr<1s<*Rs+bMEC&>a z-^`Qm0{Nc;)E}N8$c~LnS7~!-7=bQ`n?wWVCqXo_u2*(}AXHf7|^ZJN3Ki^hE#G>|^I3sfqJ#RUdxsQje8#;5`tPyh3|eY56!JM*4K;!9qiW zE6dKpV8-$kY2eSMjIR53ZRHY1;i~$?Q@4b1O;?}0 z%COKar;Uaicrw`G9>Q3$-Gx7~Fq^;7ox*8YBzjnf>X#EE5B}^=K8!pKl4LaAu7r=< zu8r~SEcG*K+7IgcG3v~=hA+G#wJ_?LyNvLs11QMfc}|lP>=So!b&I8pc<9OgV5+?1 zWD=??%tbpjl`H%bw2A+_$vBja1|X-(XG;r&!pV|oQ*=T zg&Zmp6ULnSr9isB!f?;yQ_c32=?$4JB8F6M&u^Z;*)FEU$%OaQ3;-~ce)D@_*^SD> zRr-$KL~nu}qQ{_cp8t>!?1LyI)^Y}?jej}XVT=}?%J=GC?CE}NPl4$%!zCVox3)7Z z-O~10x-Z^|3M`4TZ}R8H>?r|RFnat+s2JooNoXD_<=b1N@6@zH%54)jQ4AVUvE_RK zGNB$%sK3V|e>hpSJR;Jvk}IQZwUkCcPDz4V))-;wz7=UaQv58K-*02-(bxA1(zN@P zK5Na!OTRr3rAT0Y9~tmLzq59N!_k#Je-4H?F|k?i!=M&ilv zGG_oV@5e|}i4qH-_}3!J#kt2fk#DS)7(m-+lvDj?aI1^m!!4)2p@zkJ0 zcOhxkP)v&5k7*Ly>j+HAH#Y<-r=7=L)+&Pd$*GFtRlEzO@P~i{km-=8Uz%7jr zSBhjzGfNB>hl?y{-ABtDI+puYtI+W7NVgufNAOp0``YxZMI*E54`-M>z7-#>zG_Z0 z!QCf^Yq5nR)-7yCopC(?Fa;g^)H>4eu+4j~Rnljb=L&V4dhAE9xuzs$QE&rBEYzQG zcfht!)sVe3DzAf|hx}O`E>s`+9aP-xd<%j(PuHk@24t^+7RPg@#P|uJ^JX3Wy`s(2!}!a zP}|zeoQaqt!D-R%2Mfj%HJ*O2)GaG^x`43_BeP=0>T){-ju&J|l`^C5=?Isu*I;)S zRUNN5mn!xP$BCeYAS`f0qc1|SHUxu^QA>AU=G3v^HsgdRXG$W`*k1PSS(Bh2yBrwE z7W!%hYOIxq1#K8opR6~H%Hq?f7@dU$Ai%bdB?8{4kjj*w4G`{B5xUReD}hoTK@fX}-Z4*;mleL&^kS%I6IkM4E^6jxJX2jg`%Ws7v?@Ob0%Y z2gRNQ7=td@?8n2FRc=lC_41v7VW(f{7mA|>1e^WKvSpHQ+eo?IvxhR3c)FR-Wjr|@ z&dzT54%p0}$MAau&BaK593E7n^R*fge#{NNE=!BpJ>W0>`W(Bf#uHnTaSEB>Pq&cP z?+>=4bh>NI{cwfNpfxPWogAI}Ifz1H#VwiN3fr#blC5s_HK{DF*P&zWCjyKntHO`N ze*9ESTPsAlm8=+&vG0>*MavyoJG7Mzw{5689Y31or0K8R=~GWZ-W`@30#+OeS2Iou zYT-LQ+LAo5#hJ$h6$<$TW0@%AK96p+g{i;8!c&B&Hys*u{lpC7RGzGk3>{e#)b~#S zBwxVPNii#eyki)peTNvO z|EfZ9(7Fx!W>3%F+j!6-=<*y|@&8%df72Q<9w+m$j1aa0Wo|o$x6za?0#fi zESJf-^B)8`nSE1S@NaYo1#E+7 z9$uG8LtKtF2iAdG5TY4mRh>9b9CB599H7=>K~W#WF>IDsgX-Q4Q)Z9ZLRFo*&F05< zJrOh2KG_UqO?!L-5~?H=d3Z>?mR7z(n|5^8Y9H*-{9UxXIYwlYWHhs>mQ?$2;{Pt< zkn&gHr&1mV-LXTc#CFjqIfCd`mFu2NiWT)3L>hrwz;Bbb%=Vm(=O&3zqYl~!2mMuJ z*=bGvxFH88sH)hqjyBHmsMYeV_Mnx+wJMJx9WQc{9Wg;>VQk&cZ^W+Isk%9YG zKUh{6ao_OOPBe^hN-pM!w(S!46jW|{MQHyJ11ZT=kw1+zFC4s04ze}PamI=_Wha0Q zu@>bUpSJ@%juLPlo0_CUVFe%>8gj?42;-trd-co@1vFc40%x8zkQYs_bO}(~hWApq z?@bu+fsuBFb{PsB4hG%KtUOq>Jv~N+Zr`vU&@t{o%F!IdR=nfBf+VJHJ#e1XEKr(v zqb}&A+#l27cwS6^ZjkIlw}LcRQsU`MsJ1kO4kuI{fsIHMGA($|C?%K-ZZzD8(G2^Zj(}+>xOz<&Ksj*zSfTvbo7(@8x_N=pGl#UX zhRxc3Ef@0%o0ATY6V|o}_OzyFRAFSX8!xVKBiD~Y>7!gf6B{OCJ|Zk~6rFNsH^aSN zb+p-{;MzC2Me<=Gy|2KuKmT}lI3>${Cylmpj@q%Le7IXiAeymqZ!*Z37l3UVw=cLY z9CPNTj@2HgD325+xN4G)azg(`#ani1S5ffz712WuR9n@_PL?f5iTbM)LSI0@;45EP9oJxibepc`E<(6 z58nc*Gx{t{MTPI3@u$Zo?0SRGCG)?Pn1AwkKwwba{}Pq`kXTwD6U`4jen=aZkGTZ~ z;-NXK7z+{^zCfEfZl4KV`d;UQt4UDixKsoHCMOoX=>6#)1sSOuuIoZgm2E3~E!rnR zlPSd_Pj08aD<}||DNZ+i9Vf?C-Yoav5;LQ*BkDcePbqtYR<)PAPfylP zlbL`4JCzkldc17QP2ZHJAYhv^RE4eT!c~}s3vt!xEru=1%vEcAAOut%)8RVH3Ik8S z$Izl*wo+({4}Vi_;z}`TgB#LflH;*VD&R$C{peu7!$S*cTa-SfR?3G~KNszw1z`tl z0t$6&SH(vaV8^fppw{FE;YAQ2V*I&g!|=}*y@EZrQkA}pQr$f8=UG+R%vJROxGbY7 zdvg3_w`hAw`?ORq9rJ4ed-`=ml}$TW%2!T+A%kqD?dk-^k9LcyU`KDRGsoW(MSnLu zlvy$xfsYuN+^tx1vouy=UKMe4o#Pod?n7-I?QdaMnNFglG)&RSipHFmCKd9R4uK_P z40b3yW)OCE5~_HGcRI^iHK+$xVMp~tJHToQ6Zc@w}`8t;0EzQHeyc;2?iOo=@)@awZ zIQgv*5p+T)JhdtKNkiC<4o7GT6Hl}^w-BAcH2RM41KcBCkjd2X6&Wg5Rk&S%WilwL zzIpNx>yL+nal4=z~vb7YhrKiX_35zG>+N zb|WTy{B(CkAMv2M!Y&Db^5M*v^th)CD-74>;Sl)K;qgoUQbmaoPH%&CxR(DJq0x`Rk84fOy4bx7d@b z;mY7Z+Akd@Fr5`Wpryy^Uob8c;ysP1ILpC7D{tSw^SDwWByU}sgv`!TRW z%wpHcXG_5*N;jM7i9&Ltc^oX9L7-P)DAJIDr9J^gstzsP1T3w-E7CNp!k*;5)f1>k z@gY|eT_QKcN|ObynCUwsxK?#~(vzl~MKgMmE{`aV&EYj;D9{*j-kyK^>I%6UGP$q9 z1yyq#;BrGKyPObFkJNAk!5t)=^b_3xJ`PRSjrx|6BAHnJg{d2-j!-8=O~;b}D|nEK zfmE{mb8fCwn_c*e{`D)IV~)BXhDJf1A5re3GU?ha#VNH>K_s+_d6R#4K-Y7vviw|v zwGKKK^)xrAjmk&WMsCbOMHu#62e%_Y6OSMuLZ6W@??cl_{gQb6ixccK55Udc#m}a& z8m3p2WsV(2?UX0@3AMZL2<$iE$4(R$Vp7N24;e2k=XEd=(}l2{e!A)|yGJ`X@KB6_ z{iJOCn9(KxGfEU=y08!!E@%DL7GNION$4uGv_P@O=DN2odOU{nXPE%W#qFihU5|Bh zwkJFl#3SRUh%V_K=?DLV(16{NW7l2ty?UJ!5eX*s0UOLJB9b z;+yp@036jYvqkRRO#`Hev(@;%&)xHUWB@h|TsIhgX+T3I{HSdarCpW1>l9bfoy>_Jb8e0hfn?tT9m-!*`RglT$H zVw~wri;~=M8)5G5l^~}iGwXh47&UD2-DC7k@`)Ut95lWpu#r^GfuyNNZU2X>S|44!FWUc)MvMA}QR4;7U~cX4f(V`3#C-`JG< zC;&J@sp6DJ+>e*EiTDHRkL%YW)voNXd7Qdzta_p#=waYt%1s@3eeQL($unQW+o{)Q zVPzWS9_ZrKVNt!4W1u;~ZK{xVP&n|^Hz@n*CH#)!?5Amu$Q56pMVnf+C;y9kS7Yd` zvNS)zK1x03T(qY~{=ZQE+b4b)&b!O1m^>n1Y27!4wQ*xqb7gEmRIIPW%zs#|FHQ;w z5T7#&3>V2IA8=lx`^*oT84^qBdD1r-wyI7Kt0b?a4GJX0PXk`>9oK0wCocb+^+zwo zwx0VuvqFz@KbpEhwCk5@rV4w`*`j-~FMaM#Ol^AK$Q&cnrBHSQ*p5gieT}Q+keX?R z5+>!SZMT$XwuuU1QKj2qG6d5+Q$2BaTTKNHz65M?un*qQDNs4#%Bq(ylw)Q_dyDI1 zvN-mTrUX_r!s3Q18<&#d-`DfO(N;7zO|8+`*m}q5zTu$0@eC|6x8YbpVY0yUS8`l- zpWhINdx?Ez3b|N0AMcA5JCzPt~n_J z7+;5Rx1wxxr?EN5rX%V56EC+xWMFU_p6@6Qb(*LVOWz1JHF6U*`q z(IzJiaf;nFG8Dk`aNtOClMamD@}Qi%?Ni@zqz$ zqZ<;jq64ss~FU~v#-!uuNsNYb-3^5)#P@x4HSG~%!#sM*oo|YO1tN5Dd)+8 zR#gwHoYEDau`W`HI0QUr6B$Ryg;R)iZ8iTw9863K?=maaRaAdt5;0>zYFYklHc;-P z9Vn)t-Y$|8w40pE5;DD@C9}pTB+XG!H!yW(yP3(q>Sv zbB5$BJCKNMzl%N6YZ?~y5FMCijop8oP#IE5%oC^>9(09}V{!)Co_M zuKXfOO7Q%p>Vk5kh! z)p9zZF2p-gKa$`kl8(oUV=xP*ifwsrz}oqiVXyV88b;eLB!wDSn3@`nIcT7#X}j(8 z7G7C%L}Le96BWFord!`?C^EFj%nECgo+@RoS04}lIh)_OH zzCdOr7}-uPxo%JZrMNVnyfqM&6e0lUIa|ou!JC)8Y&I(zEs2p?x|o`jMOx`?@?J`* z#I)FQYzg8=Zk()pg-1@6&499{aip)S{q!K~)MVkN-|lHwXyyA7+wFYgZmbp$&Y_a_ zvGUy^({N?{;D_|R@g`FR{aTS!vKk}tU52hjY#O(D>8`H=4+t_XgqL1tH$xS@bp&AOlK|^cm8dOdhxyPm5BaD+ zq!4gwh7=S8JrTneKsM8=sq3^+dxeL=CbHcF8zP=K?-hgUAqB`FPF?2Ul|D00d1y_{ z8j_)aG_Qe3N&b!3GJ57F_S&sXscr@ZA-n;q4azRsS}M;9dg;p0#-RL!gh$OlI1NG% zZ1r2jFuHyIjXFHlvg8}5dkrNmj)oMSKJDy%jf^VP=>u4gy?PP_huw$!r)eVDpcrkd zhhQ?xgD-aHnfBr>S4}x935enSxXXv=yJ)*I#J-Z#K#{>yEE?dm2!}A{>@o@vq^&1? z_N#!f4fi?$HD~j3f5{QX=BlcsLE)$Kq=`r$_(-yJbOcrUP*jL=_}bSOSKBW)vKx;0 z=XL;Fc{k>9dfeK=2Z&deL-irz;Dy~m!`Rl^Z(R3Dh;5-A*8$Dk5J<$KBjc zK9NrNLBdZ^3oc?}oP%`|hlrnoHqJNxJlqPMbBk^=czLANUy5V!mQ4pB5;*d-=QatvJ8A$$I5sN| zBo;IdPQB@dS$i)bj&GJ_CP$lLI1{EG_mQTkXx#l!|7vF|A3tF73|EPjunk+o-j-yP zL_?f{jf1&$Rk@XxO-%jm3z?gKPA4lnJWO=d{($3DSO68Xq!`(=hqHw`wXbI22Cckt zm!q8syHjK=YmrxIWO+dDDX__E{l@Dz-@=g;$|{hkV)28I>2N!&zOT$t-W|`UTMK{l zliIpPGLGz1`V2d&F9rn?WGk=j-ulEx^&*S#VYOi{(1vJz0GP10R`=)@Wy)3Mm++P^ zP=l{k$Dku7dT>13W_cjz@;HYxpGgl=bgiO^&|B6T?n^oF+u3uu90jRNYdp4~owBe$ z7i-xYCV52-L^!bD-LXy`lRyqP#AZ=&gV$!ZwUu@jW1Gdn;(;Lmon2wfy7CO8S|hW7 zS&i=k*mPV7qQ@)-N*qrMD+3neRAEf>;RRXzXax+iG2?p$3{O@eC-0m zHw!x(RdmbRl*tXq;G{wADy@oQ59u0x>I+lju^ki+2J;7%q8eY3+WmV>R{fw+W*>P{ zx|A6S2t4hib64)ism}3&_DL6Dh`ZC>T5=S$tXMlT$>!M3BTTM=$d_EE!}x8^7HGA~ zn#|-|^-^7)oHM7e68p&3(`ci#%jZ*=+?+>u`7^e@m-YQo#tHIZrvq{0itFbYk5XWx zJT}*GDFKc?4J*kQiOw@0DpFX1j&zqlX{$F-P)jKf zWcvP2C5G{jvl!2}LLOSQm~9DAE6}nrAGwnUMN|aVMY0V~zb=d6bfphpA~~0bd_&#%x4g~evna4A zIAvP3emA#2nHbU>p;DQjP*83qDrRF<(Mum#&kU{QLEN!` zUCR{RzlEx7H?2m-sO3*9`MQ)Gsygemf(P>#hUimH`E8eaQk09DqpFcV93yCEVxez_ z6up_zZ~+h@v0%_5sv<9PgL|W#^o#>rj^DenEO9tcs*}cOb zL$_*cqQ>7GWz+; zB=1W~Mi3pI*hEm9${>c`@=7?*5<^)vKoP-Q1_~|A04cm`2aN!}W8l|DyO?*PZRJUL z?9K8}mP`Ilgb;D2?q$!5hyaN*{q?Zkw=Q+37TOEt#pp_p0p#|JTCpTMW$m2w6wMqN zlx^LlginIZ*iG}Cldrk{=J)<&)&JR&`2jRgy*HzWaaw8_cu4X@Dtt)&$n156JfdVD z3aHEdgRTgP@k9S>eJyF2M4#e8D=BWDGx6M7$&g8!zSc1|Q+eRv_)t4wMVMir!?(5G ztE|nJr)_wMIjwY|6?;C|1@18NOpF^fV#DeM0*Sc-9HSR%(;cJ61!iC!QCuqYJW~$F zT4yv=a)Oap(zrmy5mBap`x|Gmax;79)q^h2JFeW(Hq&>y?5W|Bqh*CB%CoN?C`&Y~ z@lk0$8)`SMBJWe@g@^7IqtE?kYJd6#aphKLf>=On8mxaa4KyDc$*M}CNzL=+W;3T6 z0%TT7c#URnnUv>sp(+zbVsWWOr}-}i@NPa@J(Ng!kSdQFh|myM&X zjueQkv{C2c*-E{LS{de?!ANd#a@WO0o=0T%-n(e?uYvc{W$!z`kUwe-<747uKRK@H z@U@f{x2)jglZsV3D3U9KU185GbPS)HNXrZqO=krIx z58$SzB6T#n{737xl3Im}9iSYi+bYHk9szr^kX?JFWhB#P5UtmJEz}L8H>Y8JqP81A z1bH}36Tr(%ZYNu@S^lr#8w~s>8v%yx7C>^npnZCv?9*&NW8@@xB&uIZpvaD5mopj4}@X{Y2vQxkB1(i>`8C8lLnmO2(e zfjki^X+5iTn?AGH@g8q?hf|y9)DzOqe#HHAN8Bh+Fg9ki3rK0y_EcF0YCM4soYipS z1}(hbTc5Dhwq46Ww~6^D$~nXgQ)h;HQLFgFxMcWn;LQgtDf|{A#jODJii+eH!b8*{ zolN6f>h7rk0WIrAQj20Cm7Ir3`{cppC}v-%=GcGtaf9!hFlD8nKSZP6(&KP=8*KQ! z88?UbWU{mVB3PKx8}yc4gVbl`ys4EhEDNFn*Uguw%v}X_x(((9m6&(+23h@tFf2 zhBixLKImBO(D1rm!Tl7`xCBTh@t$V|(m4pmEu%$af2ylLTb~(r<3F35OrM?d>a?8gBq z>R=Z;2$^K~L36ui?&yOTg#@3=u9>X{*BCEQ*M!%;Az`P>)4qfFQ|0*wp87+lfw)40 zqg3#8fyeI*V)o)CD4I6_#(^`hijW($38q?&KW`R*#)Nxw*=*|z5?fJ%JRoAr5|UZc z{=69}u=nX|SGm)c#QFVphe`!x9YNZ4V;Ejz846+#oM*!gyKYCCb>*HVU5281@u#u_ zj}oXY%TK=%PHVC8#(e@?`w_0K$yx|%KjZUS`rh2z^z6yH*LdBa`?O`-Oq-G)!4z`f zN9UvpqvKu$wV!I_c0pdXG|$TWZ@S4m;EtyZ3#O)G42P~?5f_s8G?hjsZSYzI#JmfO zMUuw%+<7Km;`wkUVdBStptS%rn2dvcA^u@gAPT`*9=2xb%rpd%+u|Tu&tk{)4_>z8 z`U%GP*YdH!visCb^;g$HA0t`Bmr&m#ZMPi?A$DtH6?zdlkC#EOUaM<-#6Pc{o1PDN zZrf-@({cQ@xtm^OK_wlaPZdon+{`ODCyzj;7lR}Ma~of;hlt%*mg?KwF_Flp{cqJ= zUsdk6SsG|rD~17003K_RQYq^i4}A@?x6gju4V4d6VusT6Azl7-!5QW`u9jK_1E6hC zP8Gba#?$ZbI+DV_^RcJMa{Su!h8rHdQsFE~iB$#U8;1u1CsP7lz{wO?9-@zD0Z=_+ z$b98K(7z}sDzuq|YFLYxPP`%o1yLze%xF4IS3>F}h<<8(nddI{L;yIMG0MF4<2Km8 zIoCKTG~7g0Pdm~qv zA%~0ZSv^Y*Xnh*Iu5JPuRI^%3<*JMVA(3sVED`K`kf63#6Di6KPKmUdx1_P6K}6qaxYGfojsab4rnVS99vc^h z=x^PLzM{%tPz;?$J1>3&bYEeWAUL>;)q)_3<)Xv>9Mm>rB1gOdhTK8MNar}~#>Bx( z$>06J>x%h6!S>EuF^X_4P$ed<4IfO7xvaI3)ojj$GjK1d01c_XWv1lMPf0-$Ns*F{ z5wKeJNu-1*SJ3&8`Bsh>nykISQcwk5a^nS?*h3AU8J5UyD%s8M*pqx5x@I-CJTBeo z6+D779=mz5Gv?V}%wNvZG&lrVH(Ph*Pt0p%pISnn#M69`A9qDf^T z8mDXSs4_@{nu6JFSJ#&!6AN*Avs%s!4_xOzcomikYbe5fpI9hLg4B4`rlZ@c7UF?Q zj0e}gZg=xFcURbE-lxB9SxXwCpgi(w2}UE)rkT8r;E^@?nL z?$K67l+%XpGy2criVCiiuGJhyy>G_Gi*WPURg0XSpT~ zewYk8>%%?C)o$Vh2aG7S>{Y-T>{taeOK#?4N^3&>@H%f?Pb>p$B1R2vMaTDP zkh0VBH>EL0a631sk@_K5TM@&oy=w$F)%9e54qH-d4bVWwje$28!gT$`r#}0&@JMOy zi0#=&nin>eCxd)ECYF36v}-ePU(b-F4qkx z9Gar_(YdEws8U0q18yaUutA(p9qJY(G~`y*XFX*#U#6>F>+_Yc1NA5>XoBRJICc=^ zAJ%%7Mu-viDXBNSyTr2uI1w{jv!t;)SKW(aKz?QrLq*qGkCH?8F$9P1N@Q$BmCE+W z#2h&+_rC(WgDcf*e3~hmkkmeqpFIxf5K>on4G{6ZS2Iv=4rzuU8pFphy95^Vp`5M; z!h#Sd8^NQv>W=H$3Iruc6x3#3ea3-<6hm|wS@CHlnf@4e{l0aA=Mb2?BJ7!vZbNA8 z)PoDq;=IAO_cV@x^sFQ5sUqF+7^*U~{r=P& z{BaDRw06?r8UzA70*0ymC^lnx9pY8YE{;p523LyTw5#;(Hq&RdlHK921+rYrP-bBX z5TRWm{Y_=-adXZCbiD*&eZ}KeEV2e0BB8qCOw3^mAO)u7U)ngT1kSqvlAxpv@5aN~ z9LZUt53`sCg}1^Ar&T=Qa7y79+DDT_-B9H^{}AfU_|Q~LImnu-A4xZ-{YHj30^D_3 z{R05e#-(xNE5O|({rzy(4f(gi4Pf+|*6XK0mm{O+)*yOcgBr*hf!uY7+~MaZ6`_qy zTpcYVO1&hC$&|*ekZ}#5KrPxDZ%C}RO7pO{T*aq=YX#(JR}wyF+YgIcHoKOq{FurO}j0>c){Mm7)P zsxuJXI9rXm{n?N*O9Y3h#(P7diI6~s+Kl98vzjr8Q&t~>mLP>kYoNY{m|}GKH{lYj zBoi}n)TV@eYidYJoF3ZVI`A05L(x}0qh>_3LtBmPgcpm)&~glQNjv$%?2)WnA<_OciPY1I^egiT0nz|B#dcRC z&b4sNfEbq~8=b{Yu-(jY@f-{^9#{kws~9YBpR}eHs+MbS975m#bqK2|mphvSGY6_5 z+7P9H4!38%i%GZ)t*IL7<(3u95*5PazI?$&mk`h)pGeiD{@uC% zz?9#ALWo_*5)8@3b-4N`YQ6eB#nHp#g%#1yy6i0WPMZ4Yn^DIR1Yd<{o5?A4=W2_x z6?pLlIcCO?WJYCm{a_`mPaz1?S#`V`ze93FF`%VkcJF^P zw6U0nWNDC4w1#DIX7BOC-FQDqxv?~ePS0Le@I zsUB%EJVa=rtnp0b=3W8N?-bczZzy;tc*MvJV3%Y&qNxs=^!*2n^MTLwfX?z}h9-LHB( zjI#fj2mJTaWdlUue!T7_z;IL!@EvU`C$!7PU5Vn~*!`E6a#IBXKgnr5-H6{3M_2>} z&EmA>HR>LGA#bGr9`G_=NN&I2$2n%S$s1{wEB?W{r@wNyK#>gHCs4MpzzhfgrThm+ z{D6>1Iwjg(?EnBEC1ed@i|glWS)lfT+j!kYC)d5$0f}H&fP@5+sVXTUEap2ugujYC zn!Xe1=GQjL9KtXP;B!|!k#rbQ)usaphyQa#BCDofc4Ig(rs zO9(tbP!YaUI61XB)}SJ{vH`T!QlK!D>fqga?6Z~$5e{upT&xb#kU(TC(DP((S7C72A-wXcSNtp89qm!w*bPJX@=wPaRqThd2QG*sRll1TbuX zB@VJFy8Ts?rE$`~SWLCUWn1}y?^*-st_43%8tJ72Ik0j2c+I0~y)1vCX)+H#)@!y9 z`S^N=;SqijP{{BEZ@Ju>C7ozQdaI0Rc^vQ5%$aq@3Drkhk2e$$7VR-u0JH}Mk9i!l zXu{J6JMmYZB{2q_K^RvN_ce4>5G{3vrY5=d08-0AAuV8sKOcWGx5AU zOl%nSB~?F|)L(8TE8+PZP{?!{ML-Szn~B3Ai>ZF+&1y+?>N&o0<@cgRtGgwhgNTD4 z70urVIMB-pq{bmQkmj0sACv)|0{qZy+4PEg!4WhO0chV&2}g5!Qew4OhJkS~K??B1sy-g55VHM70g0ktXGbH~ zj58IxfSg_O*r+G!=0lCVi*cNMoitHb{^aScPO&%F-+}^FCJCzYp%6n*mJg91P@Vd8 zs-YT^77po$YSKCT169L)NCUM1ke&p<*^;3DWMNw=9nGEe;el7OjQi$0;{d+hM*tf; zECRW8f?GMnZvWR7{m&;xYF|mm_e4vVb6dIq>0Dvk03FPP4nal(k)q_`;zT_ZI-vJ@ zh=%5Zu%1lrFp?t|%Zbh%FPKu_dqC1pw&IPYWZfn=%2e|j%H^d1YAD>wP#*^U@Cw_> z*LRYs{qIxlfE-a!t4Bffq48A}FN=eT?i?tp;F!jR&%#=3Yxh8GvT&Y$E5=eZXLp5y z_de8ST1b2+EdhL{WjJH@5R$-@+QZtI#{oZS3{J6R>@V4z{UPc=iXv1tQudg78+fOf zzGMLcqW8Z3cP->jDg>bH1k9_Bcm4cTfi|>jZ(|C=3Zy&zi;=+4SfHBa%e79B9vJbz z_&itb?V*w-_Uy@38wqG^$`|t$*2SGQ8=wkh1<3uCrdI!J&;U#OD(VmLI8;I#gOj8T zig@MIL6SrGif<&HL4DZ;@M$4aB8WS~MK(;BHtpBF#br zj}2~*+kkw>i{k2_jWv{Ic4*%Rc3J);cwYcxG- z?^lM4=1!l7okhc>{O!zN{`g9b28!{w;2j!zVJo12F%1B23(nSzFX(dXKT_-|m~VeO z7J{5_`P@2Mgt1&Cv$e9bH@(bmdIX}TMR?iC?c~*0D4+&v0dKMxWqHM9>%7=QOOLC_ zIaaSW!wO-7wP(_aA&aG$>uZG z1!W;2p?I;IhDzWSVz+?py~DsJd~~?Gl4l}8{+Da?uVt*ZLj=)C4$;sbV86MoX&_y< z9a@lc1zm$iFgHH!*62XM?G7Lf(~jUUr(-wi2@DInP7B_xa_iQuMsT`GB;c)9sqM$* z@hX89RoA19hQ7kwTCb}exzSZhQ2$HyHXVma&m(XeiHV%sHjqv!)!R9i&jk*ctE=J6 zy00Ebb1MY}1ud)eoPN4+<2~5tY|X6bD`(Mh1ke29kFOqRx9CqgYSQ=#=oBA;6WA2= z#M1BPb>1=YG8gkFGhSgC04wKZZmphU^o1&vX2sLXYtEj8cr}vO zDYq50Mjy1=<6gT)1rSRx;@kxZLBZc(W)(zCS|iq7`Ij$@L8UH}fa)+OpNSfEUF!KrO|P7p zZ?6S(B?+1~1yTHNrQk&$-I@NsW}E-^s^1<_VvgO6Yv48A9LdxyOV_K9x=l9($Gv|2 zF16^JK)^Eh6Xv?~*Ol2V^$pe^!`8uhT=fRM$i5w*0OSrv8kH$K6oP(}e=Rwqomfjx zkD|KG8166cCRm=Xi-GB3SE#G_!JCdmI4sA^$oJEmUS-||vjsHlN_HAJh<`tQ%gbkP z&oTU-&huxqU?{=BuZfp(vEZqS;-Uc*=4QE;S%l)vZ*&@#z(VraZ0R_-L& zDn7YD;V7sPoEYX#r@E3^Y&Mv?Ep@!Q1^kbEH5FM#dH%xOD*ARxth-usFumb+SE2&e z-+0h(zf}Mi-@WaQ_faV=gw43Ct~ds?mJW7u>VfozGgBS!G7vmiq}$xS4y+NVffI8U zfT)zAA>amQAMV3nbq;VHRxWsu`^mOt$qeBX79JiR4I^W_%G}bYmoL#(%N=u`5knRo z7e}cZo%adk?STO{8X6Ho29+o2)ZTx?bnx}g1`!+|Eri0<`zElarl!dI_O0s|d(%Yl z2&Evw-aStn{4~w7LdB2r^^O4vx5RaIpZ`)U|5uz6Jc~=I`laKB?UT4EF)|8@)zCEU z0+Zlpnx8&>nlwrgpSrAyK3rgGn68=g)RS2n=WqDq-9|Kj)NG;6!`%^$w}^&!m>Nj*$s^;$EDxh`+G|dnJO|u~}JyOlx01 zy~x$6J)a;nTxjn1%yZ?lU`WFzve=Y%r>W|p%PRS}G za|g$nKld(V{syMR*~f*~6Maonj_u1DU;}eziOF3{r{!`30Y(R4mO@nChxfw(&@h5Q(d^C{4VI6AafY2UMOd2 z1%p?(HSVqq&%Bf2Z&DK0hd23%_~1WVxCUP1VzS)P-Q)w#q4>|}M?GeQOsAz@bL9Eo znDBi<;HWu%GJQtzjvVjmdh+lJ}^$!~KA)vnvx}l*G!E~?P`=$j)N9FJ^ zcN}MdlW~o#Fdw_}pB~vTW zNu32-nbqx(Ipz5MNM6Gc$CV-9=i%HL2DCk72-`8)(=dxc8 z1C#;hW*wf4UL+E!i+qT0;7}A8XE;aXe@KfAi~wxqNwI?>pAL*4j-vv1L)?Ff<~Ez`Jl;fhkG@2an8s*UHKkD=$k^X{n>5V-=>E4N$yHy^1)0A*=k_bQ1be?tSspl$7bD z4AUWQ?o&J&TzW$^C;#{I?q7!la4i> zKeKIVT;O0!#>>Yi*!^H=l5dkf6>9pXQbjKBLBEzjGW{d+*81 z21xZj6c(9+UeZB9pMXgz^SNdVeu?8s;=&7wQ6`O&+w;^JT!a?n|0L;uDMe!2w#Ttk zB$t6VcD0qCh2_?CTO1SkqFm{{r~3-laObN#HzvAm>xD(i2@R?eguJjLZW#CZ2Vr&* zPcFX&M4Lh66=hjA64+HhTNtOmI=%bhpq6D^l6Uep_{aOzBvI;UI7aSS4u z^u*fQmuc6X4I&iFv3zk{UZ+*P>1n4jen{La`?r$Ci6+qhL1pTBpB8HN0KP3Gdcsq* zh_+KvdF}$>dS{jEJ>AOUV^LSlK3RGLgc$y|Yz+^OPD#=|;=^Y!gJ98^nYzfug`_$C z`9%23b)47!LBxuW#|X@vy&Vg!Y$;Ac`ogrL5%Yn65SHCbSi;ipImyWdUn=LAn<7kIU0#mYKyKlz$WqEuJB3R7 zCmRbR5&5Ib-P-pS*qwmh?8|x*10x7Xm0~dF_g$~Tt+1Z*emiFn+VFfU1;j7?r7hA zAQxJ5PU+Nob7eMbbU-`aasj2bBjC1MlFOL-8swbB79E;+FpD<*hh;OekSo75Rs$@@ zqVw+f1lD@V1PJjYfSBU9UPY);?N<5LSUFo`;Ev|BHzaMtQ^}f4s(BO(Hs%6JNl6W> zQ_X(G4zumA|KSEg%Js^%xqx#V(%?@vZD|k210gW~ytS3&MWQV_AhMELXui?ue@Qb|$5-;6Mo2#65pA>Yvz(rCV zssLZRf3}h7k>;Ck4@x3Y@PBc7l26QPAQ`rP%3|#!2A`4>j7?4%Izsu$nQ5cYk)h0_ zt9bqTeaYunUaQPVF2mKb*i1|6cEJc$LP+SJ_5guM50IED0QuYr&q zvReV{wcC1*-#<0ZfC77iwkgm+2LX|TD3uLXUWc+9{z#9U13kTVRfR;#v&cx=7dQQy ztW$4DDP%&6kX_d`LJS6~W)4fWJ_J(;Ef7JQZpVM#Duezd80eBM*#akR7l&0N*^G7m zhi!0CB8>i-eskD>9B70T)r!r{2&jLS>Ez?u$d-Kl838hQ0Wdm+gQx90M+v*^bGc+R zryK~(@c@Df;Qidf+L@`EuOTxBplENTlsi$bomz?I9Vgh!X5n0JeM0ps6ZOqdtK4x| zf)z6fcw-poR>hz!)pVV%fr>oYur0REFliHI?uYx|0<$IkE0-=xo54UdZw7#Y7J__eNJt3%5KyUhN=FstczQK)1h!`{ z8TY&lrJ$r7@Jp^YUmB~{W;>B@zc6UOPTYPej{SqxWW%#W6OK@K&|FwU9F2VW^uiyH z_isOqUp=&^`UO^<-rn}Q^>ho-1m`yE9+1HaQO7TS{a52+5IgwN%?MGTTT!K2sI9GS z1P=i$hiN_qkadty)1{EHU%lEZKG~(m&j5Adt@K-zG-?HDv)NCdJ$r^tM#cosI}{k9 zO|XajcyLmdx;D08EQ!RQ0Y;m>>~aBQhLyM%;k`JolB2~p2e{b!%Uj9bNN)(7uuM`S z_y#lAt0NiAQV(6gU57*wN92Dfm})4|BA=JI?%2~Y%gGRSqt`O17pX)o>#+-_-%5tI z0OKk5*Y_U)Ak-}L-2<{&*zSor6=5*8XadPvOrLq@VqeVZgKZR3M+~u)utT8+fGF!M zJv`n3la38uV4S z-9_bgN1|{Zr1f$<&cWHb>^uC&U_H~ru5X@Ey~6t~&t~Hsx7Ac$n_NNzu+pX(`|aDe z`ZXSh@{kUmf_9KMWv;~5EHlv7X2hBFFRi<(ismGWlge`cGY&J@ci->|FHmE)NVtN(AnYA-F^h1 zS5oyPE86i%K|%NZ5vbbJRr6DjUV^J6IOW@VcGU9F6K;#z>3OR7QsJ!Y*4++0Ay>IS zzVFwcYAVs3KSftvK29FUb4Xn$F;>TD$S4ieMcBBw6ktC|smkn-Is6q<8NYUDU!wg% zKi2ONmOytjMH@8x6 zouDDbd*&XjuZKJhPc$YN?;lQANDtD~(eZq8=xfxdF+|exfJptj-**IgI3*-;6m-Ss zL#Ltw8a5snQqXX*m#cSvdXYoTSd@8=ft0vj)bDv(0VmnFXR-XS2GFEAuRDZ?zaf&} zBg@h2zl~PQAvKNp%9BHQJNh4%+#7pHC@!bOjjgaMPnIjXtK|?+`G^H2=3x^O z238Tp{pqr!&j1Ef}OMU>hutqaw<21Q%H*AE`OH(61^wSGF?jn8|3yW>!qryDQ&aAiB zj5EfdiW3zTeJXVGruaafezIf)3-+1uvBnZ;L#&s_uW{LZ3Yj84d-hJLQkv|w`}glh zv#f30toJ9yac$jpVbZQLb#+)HB7g=Kax?XvJ9oD79s=6G1-NPEQmosllP5!w%5SPE zG^=!~If)SWJJ`#MbZ8=_L|DHCe`_?+cn-KPS+uDzQMc8Vn;ai-fLkTQQEGB6RtT*x z>kSPYAEx#zH4Cyer3PnxUM}lNQ8gEcAjWZB?)_D+<&t2Z?|QkpO-@d36-c){rY|N; z2MZdr_rui9pFy)fRO`O8z_A4ZZwm5v5X2P;adB_WDDbWkN|A{(C*U{&O0Wyf;Y{Js zB{m?r7I3yiW)WeWK?JJ>q@0BcAB+JOCv4*^pXxDe_CzbUaUvL?KAcF^uTB79o=)YB|J-ZEULp2K0(EnV>QY*~MCLIYL`T`kX5<1;*D4pb#aNttk%ok8itUJwGt1d42NXS;3m>BWp~RuU~ICwQPq!g#In~j?(^KXX&`2)%iadOC;*G_3OPfzm;R97| zaROuFM$N;^NU?{={2v7e1x-ThRH#*UBRM}ACLpUh?t2nIt~g<6(HL%z6nIe7bG5ap zQfn8RYa(h3eVAK1jXJHs{JD42tto`2?p^!*EAx^MM^QTfdN8S!Ga2-aD&=^kMLN#; zCpIGNFnf_cMwI*rtSgK8B>;DyK|Mu;RBeK<@sLCT9Xa8lwXsLrZMrhJd;0gG`Z6RG zcW~>~OrhDQuE}xU8e`QL{$1%J=y|r?m5b`&l_q~|@%tBE-WU}#%Un(?+l)R>WHf27 zjM8_nqXDBRtiW`jGhL{5{-!%%?8zW{UmY~bDW&j~a+og%YG9@&{=~u9hO8ifY;Z9Pbg;`R;8tm8P1N>XfI^54A zUYLnu6^P0u8~))uz)#*(+n*TYQewnxcmx)2e2cH1y97{^AH*GE{XGQHkX)0567$Yi zUTm8hajIUJEOjJeF)k$W%NJ0(3MK&$sxfC-70M#SVTTA;lJK%d<+tnCHX>T-bW}Kefp>5L?4>>FE7Hrj4 z_$=+soh^Pcs_}DZ&bS4O{4pAw^pnLp{lpN3>8nsWe|S5=-#IJ++bnGtXw$& z|0|OPHf+71NQdptXbm)nC!2G-5ANT87ePt@dzz+r*xH8|2%M*OKI;j?7$|QC=WHzx3C@u-5~E zxV@sYi#eRjk`?nAd<8B<%BIzyLFJY>59chVq>3zXtG{{kCJA;Wg6yG0CxMN34(Ftu zp~y(BIp82H(7OWxBj4(H%!9?FTR`C_M1AH-Q##GJ7HEW~v5hoaY9ibwc~NDLpFC0T zQO>FHZ!Y`c1)vowu>a+T!TU%`GS{_d6jW3x)eH;yi*5riL{8HHpG1vbfWIY{PaUvZ zBNA>33L#KuGeF(^s=#dMs+nKJIn<^I4tKHH5EYmVamZ9uU~NJ719HpWx2eo1@JHwe5SCdxLh+p|Znd82@&UDjide8c-YK;0%7pI~gy1L?NG;_T_pd766>1$miWLYx z|Gk4^?Y5c7Or92j!H1!ljwo;3L;=PuVQp;A9HP?D=!iMz*9ejZ|7g%^zkrT~tOl34x6UAdg0k zQ2VU%tDo{EwMi&(IV?tp2c1}4)%m|qF+FyUkQ@+r?$jHg|Cj3BntplU4XO3Uc1QE5 zN($pB19hv`N?kfLH50aITw0o#T3LDA;!{KMv}Rs2saG}%Y+P~-b=zB4M!*&M?8xO}(*c%_hl=tYBNTSWns#Y{ z_L6rO%w#@b;?YuI;?rMQR59x7zb1$y!+t_B;As)*>BV^C$FI(xSiJ0+2M8b1k)iIz zC?@J`pIUFYD>Z0m%Uia%o6d9X*7q=1^13& zc^+~xu4U{);b&c)QObC)I|6%a!3t@zuPJf5V&(vV`WRHDqob2a!O5q^Y12W9B~`y} zU>>gB2V=w{4VYy`yr2nTP*;MjZCEkWpR1eL=(MGH)qL297fmA|wRMi0H0zqfbv8Z= zk%eP`-AImiFZrKWUbsBaBj;Z91q7ZBPGH7s8csC&`JO{FR`3V3@8QcD&_x~p#*#z7 z3ul7l)#wKFBPcuI_!Oy>uBW|=EqTt>!j5K-R`wt|0~>FF+u@pEz+*Syqb@ji##{gs zXhVX{Fpsj{G`UO4T^+!#QNAe;g_gGDoq%_z$oNf3sH^=aK;{&e6+~)es1ViWr~2FJ`4Q>yhB=AVp$%RB+(irJ`OjWX zERDN3?WAXbW)|g+V`;4?!ZSQ4*EOoL#yKNG=E~M5-}R#8-p0Ifg*MLAY6b*2;wPst zFo+v%REgqr?YIyUqiSyt0;WCK|v zIl-uY{$uIMS*u~_#BZ4TGa%T_wuSry2~Zk^f`X8$0I)0XEfEG#@AMb6HyC>_0={csp~b>a(yg$vV> zU4no-_!dV)X}P_a-REn!cnUaM8W%5KoPS&jb9*3gNF$j{c_zE&rDBG!CX6bsv7PM; zO@^Y+eq_$U0@jhe)sIol_#OiK9{(Tin(4QyJI)hcT>ohns-SUv{QhSmZP8-(R zK|o?Txf|FVbXL2&BdDQKUJ@1g(IZF1pdAJ9P=8Ae_=;9lsWSKa95ft0+6d=GmqD9K z+-Eu{*WTY=`x+)|bG=znxymCexqyvZgRY?wV9u#Yu}vy*9&YDBJg>wE0zy*K7%n}{ z`no#epF_l&@+0>v?>V>pH~Qi;fq>XeuE)WK6H~Wbf^hwhuauUu-r2<9avp%m~Oni{cb_k$yA&MCE8v zMygfNj}c0ZG}L1}*gxLLPh{&qm;5M|<(FM&^&s^57qlL8i4lfz}F#H8`b!vx; zRQOUHK@bkgoSdiU)#qD3UN&xx(KYCC030#@Wvg5aoxM>@hleyHMlxn1pDC9Tm-~3(Mn#m#4>?|%le&yFg+RiQ5eGmH#HP& zP2|EDH6tg8kO|?(TAwUsz)GgTW+ogM(VRV}d5F1dg)aSgZqSk+VAx+dbLZEW_p@=b zrn6+{VlOV9M!9;4(QI+^KOi9SlzVYgKO(Ik#_n^YmF1z6N}x0#S!n5lhz9XzKBF6e zMAbY!FL1NDh742@+AoZBxa;$I9hc(^hdzR3pO7}CL|9ASX+DGKpUjy_B@eyY;U+7x z-!4Rtdh|Ef?@z)$s;{%@5&V#_%h{?Jwl<+681Mzqhs@k-k1+7`z(EKBMs+J$Ujs*$ zv&;AD7C90yI{En zDzX4V_p!FFgduH=;p?W=D7SyFAp8@ld<7NHq|e_3+aKWBIvR%(!_>M>3-9Z58DT9X z9|tAS?>~6p$0{Z!##~p)H9PxsdUn<*T=E_qx5_y8M7lyE9?rI11q~h%b)ox#o9-~a zXcyz51G(ri&pSDEJjO4U47rp$1HHB1UxSN zGGKlm!fcND$rV8-Gz}aN`l|OsKX6wwy?OiAPrd{g9)FM?hv@*;z2`dj`?XsmjJ9yD zz9fx$(?LE-_95oX*b{tA)BEy?M zLcp)%X2T1?dMajY+~;=KQsq;6QLtRN}C(T__JGz(D}h- z=^0wiwqF>4Ig{pa)L}peSgHKxR@xE2gr07I?SybzNl2MvGNemsWY7Sk4gxvjOnN{I z2O=AbxY26|M@jjvti6VMPyAqiuj@%CKN+v1EI4y&ae?8DS_>Z8EqQr;pr=}|d=ySC z&yY{`&s_jS-`DE?kEN~!i*N)bvGn^z$cXEh=IV#y!3T`QJ0?mwq*LxJqXxTXsT%p^ z%ZQoPo%szw+t$)w1i(0Q3N&8q*}@JtTa0goohKUsaA%{jE#!V$MbAuOVej zI$=ld&R$l{1sqR$UBG$G$V57&4#ZbcMx7IY?MwA=dDj9@6KcX_fLA+#9s;$c<@P=} z#h)8;12|?CY?p|C%*s7HcF^yS^s)2(3^l*Lqj5D)>tu}PJ+qgMebw_eX7!ApvMhz3 z?q*4t5vIh!5~Q|xchYm~^^7QcZhr8qqNB$usKSl2X>m z#=Q(P{+4Jid05DXlFL1$_;B1J6;c*dfW>}q9JRba%5%442n!uSydlTU7;)6D?G>61 z+-eGApo0OB1R4HF@(QR1^Xxq;Cj5DlfX{F%Q-#^<=@sh8a2olW-ncYD38!T=;Q*5B z(OijI)nxun;5?)c0nh&>>pLI=(MF<6ecPBdq|i*1-3aCX9jb5$eVA%lv_3M=d5D?I zdOL5ZuEDHcW<*iOTbNt=c(Ymc@Qm{zshdz^B>`W)HQPoi&l#wm3m+p613Y9+>vj3F zYj#!dWF9zNsv~lhzCtScQ>5Gq`!K#&7t<}8WRR+DEeVA50?h05|o$fuea@L?s zBax^aPk5$YRmdO0w0KbX?Aug08qtT1I z4N61RG2OkYh#;DUqYYH?d*H7jsO7e$Umh0uiW6#Tl)iEV%`#vL;=n#*73Q-^7wdsj z08CycY_MkJSFBBu@*?`a+WIEB-p*$rn<(jnkpcpKZ!++21*q-3(!Q;2%ZqwzeS zkaNd+oBM0dD4gTN=SdY2J6Cpb@}?)m9rg0lpH4k=qKRB~23wWs63mf^WdH!N`S=Gz zc1=*r$oNFsx9r>tY9F^JsleXO=Hwp={%k=1EoSe^k++i()JFckW!P1ZS1Z+7-93D= z(%>1pZld1!9*6ULm*-Dy(41XXVZLj;usA(TIy&?c;4Y;%YT!t#$H zwx+kP#g^ooGcMVWm`zCtH=*{_o-Te7UE%YD>Zln>C9(GwpN-!hb9)S+@-qZ;X0u{0 zMrN@rz@0;sQ?velxJzedhfDLjGgZ5$f=hv3gfyLu19u9b*GLq>N40uo$C?0zNgoW8 z5gZQzsQpPn$EQd7fY+dfu%qoqN!LJjXly@cVV7A~;kVa*i0SO?Xi?ci^dn74PD7J} z>}@jQ4vr+Qd~&%ede3K$8487;U^D5JoNkSwgXzOF*aPY(as z)Y;+EDqOx%KF`Q@wsTSr8q0a}_p;haqGxplBI##d-b$4w?btG4^z_`yCzFNzC9Dms zw@?6)n*n1spiDt9wusc{3wDx{zF;nJ2}uFQ{q2Q3-N>E=(_1eY)x^y$=+LrWB%CJYrr@rI zfw!2rILpR4JUo5hdlywe`iF|U5lbjhPcAkf-|36ce{dx|Jxux}UTbh?-2B&}U_qEN zuM{5zv4Th`=$fU9&wQVP{Q!l26fo##8eVu%MS+U5e?8F)FEUB6QS0`4?+X_5UaHcd z=h3dZ1cgSb9BPkwhlMa6(1T;A)tIDSRYLGv@62UiQ3&R-~zp6(XeoYV4k~P=2e-j1wH8C<=u~R``Co3<`)N5Lgy*evB}WwQEzf zyEEQPM8+hbB#OP078rO26)wzzUb>8VXn%T=AmEwsf8rH?%DP9?8{yvwHV4@#AD%yu1n&djMns@uXHj818 z8<*?o{!_DhzWzRqrCVw}OmuvMJ%Pl`avLX~{^=pZy_S>b^ zX6$qJduh|}-DAV1qWl|DTvMms4bVw52nRT4%cnma*5FVZyh=}9>+_Juj?C{*A+`l_ zbEM|e??B1lWRNeO9*YJ2CCKGl_RFCXj4Zb~j2W;tx^U`j4_MYd9A7G9yI zFnS8+WDJQO>spFQ=QkIB#MAG8w1yq21nQn?{plk9^rMU5_~Va{gtTv!`<`wIqxsKk z+rLzk^I)qjUyt&SxB0((a!iISskNo@9QU^+`Cp#E{w3&o4tOP$enKLDdRf2z(L))* z^+N+5iTxwq8u_>YJfiLYzr8;_)&GBczg)Hdzc~N0Um+I!|K9u4?)moP|A&@e=869Z zEr9}P2qzvOCovZnmwibOx!))HKQhJp?@H@}-S9QXle%Fu%iLf+uf*<;WnkPrtnB{P=m9y0Jc~@0Yz<_2%xP+&3g3|vB zQu6a^9ev>YMpr4GAA=Aze&`V2M<;-&lrF-&gCL*8>h0r`NJC8{mnamrkoG^T-pG02 z=JRn95jN8DVNVzVHX{5?Kwc0nM5+SG%eo3c>=hWyIa%OGFJAs8S+yAcTw7N7qG4 zgn%qiy0+Mn4gc@D%0_ROW5_ZRz~Ka`ap>(5p!|ntN;(+$`uT-|Y`ZK;fcy8D=O3^9 zvLnX>ow4r*S?y|=`2csw~VzvfGY1mTHIEd;N0h8{MoIQI1+6o{g2+o+N;C)NA=W&6% z{hb0|B7Pg1Bo;tT#Uv$d6~hNMY<9m`fiXeGtOdsp`pn;qD`1!9qeTXm*v{P=cy;H2 z>1c&&k?FuiU>6h)^;3Yt4EQlg09aVV__%qYc#IL0!W;8?L?^#>ix1#I9L2qkq_twq z5E}WqMJCeN=bxFXcP~O`77UIdGFbJ5zj@ZkM#VMq1WdI7urWGB*$2F1arm$3yff_( z5Dv5Yz=m882vSJTjffl8P<=8$y~|s8aP-?3m)wA1v_o1aHL}GGJV6U#4Ah-<0&I@c zp$R)cWmc(9?}0DDvSR!rBL4Z)GUcHWKT{Bc*vp#_7NjFbWq)NHj7Om%qQ{aF&hoE9 zVhCtw%S(#U5pFiCK}#UqYUlPqG~INe&5X1YFmd!@$cLzn_f=;w6t=&<@`hY^Ok7kG zN0Fe$gO2EIK#LjzLpo>M>&Q$I1gZ%4K%`#(%FpHemv&r(@|AHH6%k;Jnhs!;h;@S3 zo)*H7rY9m?1I7CSetbEmvrV23Z0k(j*afP^`yPuWj(~U}oa4-QwXlE)*2D?EJv2`0 znrgdfn?z(i0s01*Nf=1ZoL#iQ$7t<~zyEn{io^fe+x+D%2FjoTwrjJ~fOs$@J1mU` zMMi$w$R+%~)S{5FHiaXSAY5eQflHWYKEe&`&du3*@Ru+ny+H~FEge?>g@_OTS}5_w zi>$alkeNyVpOhS|*+Y|(D7aj=3+4w4CE%*55Kdx{ct9jbPBV=@{%GuEkO|BQ{W|0w z3{%86-bYBP^ja1DQkR~W_e?gxB{d?<0dv70+JHYVif?a^Vi`{NCJGqH1{PdOmLjwy)4SA`v+9^*H{7nik!z2I6)l*(78qvT-K^6 zK^){)!3|Xu9!^a4igPpKM-deO`fsIu@EwM2-d28|_TEZ}7v)-2-Xm z(#~(+GUi~9Ce5<$-(3K~*YqF3#nf2%5R<5^H1C6n*H|Iv4PVS}TbY-k z9sIw-DQQyTb`Z}l(B047Fpk@b&?Y|etu7kA3{Ctwt2t27omoy2taM(d;N~6-zM-aT zHd3Z29m_knVny_KUPTRAg1Z|>f?TWvRyHst#t5VglE}vy&_HdElpFnhHKU(7O*6cv3{)ddkF2wb?T*Ej1P&2@BwnQ4^OU2u^P1MvLZ zk15qSt>>B1E_jB3fErO1$GL5b+2(tIxWSt$dO#fbL&@$3`>Wu6Fm!#OY6+1= zW~$s-F9ww%GRrf%)d_%c*J4i`vk|sD?3`rSsGZrGJV4v_nRU#j2OWRVB@zFAJ5@xJ z3Nb}sWe$t@shX-EmjrSlK13FBOn(g&_~ z;+Tk`1Y8rMA!)t3`N>GP2c4YEG21BsPdh7yBrXS9?V zXzE#DUNEFTxC8E?WJFhP3l6S>l2;ETG^_3ho9;m)((Z^88JJ7ZE{wL2t;JDp+mK4N zbg`51*wfqKq-KM|$0XQAhrxWqhw5%=;w&+gGG=`}K;G2{NuRxyh$jNlYdVbRD`$B< zhS_M-8Ew!=>1Q+=w}!!#Miq{G;LDLw>lW$;>s2W@HN2d6p@Ikxk5aSTJ{0Deed4`X z7#P-JP5q2CG-9?ken|R-B;;$BhjF)a_UYM6APR`4^fS4B7x3=9PuzWWVBnOzwm8Dm zw|TG25Ak;asS8nR0mNBEhWXVW>`R=-VqWF}BQu^wOPAG**Mx7LrxQQcXEJ?LS9$vIdyGLq3bf=Bb~;OODOfvH!Vk!ZG9G=gW=S8-`v z`q0Q(8He>K@UAcN@*a3U0mFo1Y)m+R!!6>fnOZguo*fAc3{PTz>dm5qrL|i6BGE3K zPpSwEqj@N)y}Zlp)jNmakdYVRTFSPc&$?2bBxXA!#oLTJ`=JbT0z^~vOnlb7Ov3J3 zN#zvadBdda<;}eT`7kSn&khLtlgTAYf;b)|cq0s9-!MxL9whpwGP)uy-4=Pg`V{J* zd)b!Da@>QH`|8^+n>)O}TxyU*Db7^8dQ!vO(Nx*+v=odW#XLlS2_{Sf+#6SO zPUsXF8wbUlKQZ5wSUzctokX=a#ZJ>2`nl;P6JM)(0B-BuV@;WhePs~)h_0GGFWi_P zbk5U?69EsAOs8al>49LID}KpZWtfIAGbUN=)GuSoc)mzDP+%;NI{rCxP%fO+6Ra$G zDiUB~_S3@`|0zTKQ>n2lT{EDGXNGC$ARL9h zw20k4;RWL?3(DG36W`2jFjT>JB-~A+=Y+9b;oOe*aSGi z(uJvV8)%4i)78!;A9L^P-zVcSXsri*sXT_)#=b5uMjTG34eu7Md$(GX!a}e;tr$^# z5l|@d(IVXv2sXI7qObnlLaax(&Vz+`tE^ndaX}gsN4GIo7GIztT!T;}!>D~^7723d zSDGo6#l~HA+!!1Tf$yI{mu}egMgxv%GIjz7;(33i-iLX>x)X=1PKKfxY!E2Rf@6XB zpj!{8&N*b4zNJruiC+*T+MdA#X@~SKlZ9f-4{v%k0X)FpJJ><>X5oy42s&Kqz7!&* z4>QkkhDa8mM#DdE(vu zrILX>km%0%ed1IikF-jkIysQ4|C;B!BwP^o2Dp^FR8u4;iK_?}2tBG5i+^%JHkha1 zjA+Go_3IwA#U}h|wSNDR+{80_@ud;!eNnlNP)uvlnI(Q&ORz-IdtOTQDIy4OJ$xn3 zbz=a_Y{ZHg4j;p+XGfiS0yZTq@ujdn7v(vw=zNuP6?7}&C_;?Yf003D{xX2y zW`r+SCXU}4h>!(4W@NSw)o}1)1NeTJCAc`bLqFs@ff#H@j)A+3QD*TGcA_F!y}c3v zL2gr_%9%n{plxaZ_Z~e!ii%RCt4*xOXX4z=P02us4&%x&C}k1n$A!V!MenlUF&O6R zRoW6XgJQ8&Jh5f?3oPmo#7|=tju6w&7BBCmXlQYF16NK9Nu}MPv3)A2K4i*^<6XXh z@t3dz89Uyd^moGrXc3H8ezXXO(e9m!3*Mgy)09yfHld{b6nH>(Z}ZT3#n7vkyHb}D zZp=!Z4M`za^u`diJ6qm=Zz(=orLV|)MALTRe#XU;Pi5Ktv!p^vKJm84MNcrCpuBK` zY)O+z`*}1^g!N3TzVnj*eC6~$ED;~!aLf#M;Ex?d!$P|*};GRFqMcy0z+YquhlK9SqM2MIPdmr zG`hD5F;-opOBNZ29zvjFznTtuH!y_h*ydt8LK}JN?r246==u_XJ8TO@_bF93 zf?q`$NM%if`p9WsaUATyA4)7wUvrc!1rLuJbl zL!$76@oMkw>zdfZ#cz zqGTTIF#=m&XV2Ipk}--TLpa=Z5n5=`u5&DUPpukt7EIxkj0@{?b=M*px)gxIXZF(H zaRd^BaCpGgP^f^F2a7E%2JvKL419Ls_-|j&U}#zh-p`7jkRAt;k9EsS#6;Bz^aBr2 zfxX}I)5FVxuXLVTk~{|VNl1NsZz){8nLnH@VC=Jr#C}3jP-JP9JMh61=O+KIvJxyn zfAjvOQCrl!cM-Ze#}V+Z<#d9hwua|fF1n7u@wjzvc3xvD^u&&M{uu_zh;O=YD*k(+ zlPOq_xf=m3B%UFjL)^jT*Tz)F5>>B{s7RN9?VmNIxovw3KQ2&=>#QqqVt#Fl%4RRt zD|UdHQK(0lxWjo*zHhgI&Iq?Lu2g_LcAcB!8LB3eT{LuuBV{}(nx`-8DG!%BfW_c82 zZf0|~-rK^dJiu;^`&vo;stTk&RGddUxEK8W>LC>_C@G&^poYrapKJQLj;_(T7LaFh zw~dseZCbhR$Rt1&U&jy51bUg9iFuIV>h9e1C!v9C&9dmDkerHeLgt0nTg>P8d85-CxmnbEw z;kE?P(Kn{1oAY1HknOK(^JZ05f54!WyGoillOrayF~`yl@ZnR@FFU zY-zzNUy|jS1p*NTHGzy|E(mz$#;U79^Lw>B`vJc>^z3!I+Zzk0yG8Bb@H_{tZEo(G zAycF(qmR_(n{$HXuYe^Hc`9WO=!98J7w@+wij zPGI8D*=$}o5z_aN>+4e)%{EZ+X>BYCY)ASU1S#YIsq4s)e*Xf1_4F|F2=T(9#z#qv z^+9G*ZQdFM8E|K=u1JSqwJ73PMp$k>ke8gxA1!`zo;?&wC^6{TCCk%z=n*WiQG%&b zub@3{3^Kh2n+@f6B1z7X<4S94(JQdCJkK0qDVk|;H+Wo*WcTs|YvZP4|N1~_ThKi{~jv$nl7u~Fh3o<;- z07Rr)q|WzQby+V>hYQB$m9-Nx3XPJw)=Q?DM+|wR*~Bz1z{56 z4~OaixeeXWN3b|!grkBYXRWKv!@y&NuzU!y4a#aOVSOi_Y8nPwlY3A3!uH>?iLL-< z#u1ywh~fvXGiv5-zlZp@?Rb$MLeN9cIb;%MDKXxaPWM1-f52g$^hi&#+XwvgK*t&f zWg%@Q@L-kZTUS7EQgB);`+Vckk)ZHgQ4D*FpoaytLC%uL#3VE3*Vzb9|2j~49~Yv9 z&4B)GGaMIkW={l*cQ~c*#ygtBAl_wiHZ?al?+BN3Uz`pqde$OINUH!QSCddchAL}0 z)mjh`5fLHYiKv8=Fs@^OLImfeR(Epb+xKbEa8ONr$*J6koPFzqc)~M1_9|!Y2BPB< zM=K9Avm9a^PztS;-%k3s3|E5*+q_cN08y2ZOL~&39@AUB?|bE5$6Fk|iPuLvdF=`@ zU5|`DV)VHJexgKdMzZ6_Y8{(z9N7xm`cN2DzWC%4t5jwnV_`PgC7QzF${Q;U|Kk1k zp;>n&Ir|SUfIl9HC-#wx5sDq>qmiUDBzxqgQWo*21h;^$+avYo?@7iwO%rv@=w}&S zQ8H#?s#v~1KU%2{(T!+1-F&D>s%m#t0+n!#6iPDdM#}I8X#Eij*5f5)X2=Xrx2`ds z+bSW~ASj81VHv=mW&lkCvUev!kB;4)eovQO@}?y2`&+NTzPuOLTM?v3f;@oxX!7_5 zqSq35+!gvYRTT3uX>VEQCc?tI4KVtIQ%*KRC9hF=FfAr10Q z$VE7>zEwtNG6j>u&Q?JehM4i4;*^yO=kBd3gbgc(w141+T_=*ndhIAmIcx~UQWeQA z7rvUMUF{}{d+w@fAR=!=Ov(=*JB6!xfv#OgjhLrh*O-9o&`GVzg`y!6_sucKiIa4k z?AEODhTj%58VfUJuhqU%k(~Q$2j--#?I>Y0pG(7Q$QsXNC|aCNVy#a z^9-xZq7_HCovd%aCz;@<{q-!KJP%DvPqCm)5Q1B6i`~Xnz8~MA_mLaWc5+K+LJw;_ zV%OJ*Sj94&u7thZ*x2|CjIVvDNK`Y+l^kw zyG=AJJH?48tCmH{O)O$FN28}c84WxN)pxjtmmW50UQH=wjo_LvvoQvLdod|pXZO~M zfMm<1Q}tX1Qbdf#Qf1tWhR9g`XU6*DQwh_!qLT2vX?y$HRfe4bMbooSBWu7CY6^}8 z%D37D(Offl9@E45oi9LrNH`Rt5D^unehX(qbv(uzW{+2|Pru1^dE%aJ;rHtae^s{lIlTHoW)o zNcR|JFZYdpE~tQhT*()M=K+$fJux%;OX!yCt`!VBzN%7?YT@2}#QOAshS5vyAPbmc z5IB2F6C5p!UXR_i)dIvD<4`pJNhqiip)Gh*(dv=4$zvPE|_S@e1r&twBvPT9~W%(|;?w}Nw=64_p?6G^z zxl-yY7EtF5AW-6Xmf*EJRg?8!9waAJ>_yyW`?hF)U;v(Zm&Wasgw=anvyG|^e4rAB zIgc)h+Jn$H(6srVP-0uHv(*P?#T1C-@!#8*0E>anaRq9#P!MWt#cU!@81<)SdXj^J zgOj1otmE&^)-=gX?R51-5-J=!Cr)$u@)i62e%*jZK(kDTMZQ-a9k}HAI&AAi=V09) z7rc#Xbv;yoktKH1jZaKwaLUQP7BR&IO!woW`Gg=8hD+b_#c?4ZE^p|w=+t%PQJUPC zJceoWFeJ&EC4N2B+I;BC`zz4`2De8kx%-PrK*CIZgQ94v)P4{$?!$)oG8P)2>l zeqM!6H$^`FBc$ia(wSHr3qj{^g-(q=Bu>qv$;bk&G4nj^G`+T9?w3Dzn~#P!WL(MN z&?YPR=G1qu9dh`1=k0BHg+Kaa&`6=FZ)AqMIt?s3kbF4ruE9g|=2AP~U+zMLJQD6g zA5e*F!69GHib}F+Q!&uf`^E)-(M>^&b6ZP6l2x~Tn~qxQqlN10$l4n_h-Vj+xrP%8 z?psp?x|Nzv+jFTyG693xz-fZnk(GjGfi{?^5~ZFAs74WEId=#XG{QUeo3R}-D{dUy z-{AO}0G{F<`kcC~5<&X;>j*dS!Pc|Ax1zBj!3)G<{Rw#Z*M7t$kPZ4<%Gs;BZ+ryS0PYx4jexA zacN0P+D01Gow2%CLIJ^Gv7T(WeJa6>0$V?cyjXo9=;Vq`U^1vQs_> zE{V1Pwa*b@iMTj8Ch%aG~qE-09fA?5mf8^sujH zqyY@L$X^8hwH(s6pz5mIR>`)-Al`v7+1uC+u;IK6u=440GVZ5Ro&drUQ%@JXdo@x5 zniFd{B4dy#U=TSVJ_KfQB#H#@38br05fKp`(MqzqGH?b1820dXjNg$j{q!J^88j(vGQmI|9? zz3<3CCR79mXpQyHQHE<=L~4Id1~UuZr{7;xB7$e&t`H#;Wq=ob&*pp#Kc$kw_~lY|7f;7{`K1X5P1hW-1XBG!I3qLSS;bUlQmMK{u8o5``7@XUA`ZX{ zgcbNL1#mtRIZP^Ft^^)Be3TI7N{3+7=}K9V7KQ5o@4%ehLL1S^Sh>FnMaJ0BRVqmH zZ|QXi4hw?Inx?Omz}6%VqWAIJ(MebZ_5`jgS#+x%$gC1)Zex`BKiXnF_ANI@qkE4r zPId=$eP?Kp{YjbgA^{ zK6h6+R~Rs)9~u6SPteEeFvHWMjnrh$64usD;VC~k)PJV^HrYv!RR+Yv4`HyF!?96x z$7REP{nuOcm9tsc89zdnZ8tc=MsY^a4tBbE0g;$nDL8>})ZSy?}) zacem;#Y+&3!J(C_fZriprAa6p!oYe*0_Y29v66T0Bp@flSOaqJO%<}tK)m}>mHjYH zJ&BEP1q?)bHQ3twij8VeV$?AdHy&d>LmVQRUv+G9?yMtYB}Bj)%4fO1yJc@DI|OqU zQSiB-1zgM5cOpYW^`oV?R;LhDWfg4D&uu=D zcN#QK6$4u4+qwI?--^IxI>A?o+UM<&aR=U2@lLz_k|ZUi`#g(`N$78xj0sC4xHdK(HFq2)%xHay2uuCo=kH&0u{ zOdH?a&A+$SmFWIrg8@M~UHgSunIqBgqu_KPpw1OU37ciiBzj6fL0LQf$_}N(1-|E5 z@kL1UhSB~~yShpNPCIEDO!15;GhKq9VqoUVQvw~9P&jCbKMXzY8^xGi9R?L~_ImrK z+~JloI}{bnt?)HJRzRWFyA$<)lznwvmD|>}loBGSgdnA;fRvOVNGKoz5(3gCNOw1z ztw&KBC8WEgyThObq#IN~8lZUlz8UuwRLlcWyqqOpbA$z_>Z32Tlklb55|5w$`tMs z^CfAC5=qDe`Jb3~;~P7C*2SJ%K_!w!QVI_~opO}n|1t_jiey1@(b000)}OD$L3iB%133D_?gXP z@9<+QL2so5V(M?(Wi?oLE<#H2EJmx-vyJSZLFr$A!X$t04O!Hnv7>m}NhiniC7tA- zQet=}hJbCFf}QPq@W;F8w;=bp40u6t*Z^U1CP;LTnpnXN1wvt{eB~Fua2#2_FW4Yl zw<%W%25wMQ+I^n0IW?vI357hCrUS(^V#(D~bnat5x=0WXV78Z(mJt5{(xz$VVxonw zg=+W*Q9w)$8E(=0)@WzoP2x@i>h)q^{0hv|gaU&Bk(Go=f`?_})IwngBHL8Ig<2sSuyh#V{gZZ(Q4rG*{`CBA>nqgCK0dFUxzx0V47wAfTbZtf+$rpvk`H zT)8);Qid9>gg@nF$Z1L*)4({=JDI5L{*<1xXYt~2x7)t{DHg&cxy!d#za;NvVFJvH zfrv=>S00bq1Mo|ngpz^<3p3x%r_#=L>KXZ!=9&@rjw^!&BUYfmYAjq?K!dRGSvu?4 zcr{Ygp#~AtNag!jS+NqH-0izvk@o%wjxM5hMz!4G#m(JO3mc~v8PgcXz}OAhk+vwj zf6GC|WOd>8v4wk74-2H>Qc%PB_J-H{cnG8D1%%I2sieg!q&@Aau`qAK5VvtCNO3Q4 zmF%4@cvE|fA#H+NM1eM#ZE$dq(_)AlaVjFMD+7(zAY}HW3lBgsHgB~;xt@ygFp^Ym z{$5I6-X|q$d+YEhn$#QRv!-)8r}wJI?+aLXVli&~{)cJbo}ka*6B z`X0|RJ(j@sbOg8uD75^Mc^RhtIn-|>s7H&Bvw`ES1|pzr|2zXK67X?*zB#xoK<3OP~1!8M1M;OE)({Ygd z1FqXia0|X^N)su=$IiYCcXG%njMeku0b9X^5J5TVELPwX=hK?{cs2e-hR}l(J&gIAGnH%0? zP({mQIo}p!8`!>4+tg4C{P`(?07M&26UC#)Xa+ zB{eX*^`N7n@I+VTp?x`FuCL*PkFm2dsa*S5Q)nK$Dk1TPUv@*&S>mj6^?#TeA+pq= z4qr3u`j@H<2&`d*%wm;7A!i;N=jF>YQwgmCcb{@EpP4YE%0fAt_~OQfi1tHszoVwY z`!7Lxnbj>&rYjXP|3;oqcMUzMzEV!*D3v{dC2;Eqk^JYMOwok8OyS_Tohy979qhSO zl@M?CWk$&tc(KtV2clD#3+YB*<@=mM#6gJrqmQ!qdU-ektzM{1WPSU~P>Ozu^JcSR z`uk4w*l3g1s-Ki)aNmO8dub6Tmhua%f5{ACKM?DT&$QH?e|OaWipI4!*49{}=uy@_ zuuou6BjTOve?=)-d5PogFo~~X>OCYameSLk)Ss_ze`8$Rfh3cXD!Z38evxrPaJMGD zYvJzGQ}adVrJzyr|su_AJed|2G^@nZ-?Mo9qE-p{2aKS>IP zl)m~{siczNwWX|RO+^!n#DBz(?yIQrJ2#}U zRGywqrC6yja0*75*v?6AO!ih*w4UvE4AYk={NdmyL+ zgXHha^nd@NA$QP|UCs*8$c$3?Fr1gK&ZOw1>1Q=sQB)etC7x6(v#rb;eo0L%Z7TIL zdVo($_C~a1eq%GCSmAXtR>aj0nKT8K!9L0od)qL3B?U@Xg?v5n{sx~=xYzMa9P5j& zXf1}D{}M3%bKCrLJ#hyzSH(Xp?kRenE^~=29 zr~nYf9}5dU*epXA|F=hqFM?c6Reemd9T`i(c#AGuvw%BYt=To?Q_+V}u=acfxkED| z$1|Tde1dc(LeZLR+WY3A(|I!_+FblUY=c)KdM5l7?MGB<-FY-LcpwTD-Y29KmxYCO zQe|V*ldlw|<-dmf{Otc}H&Y_JnQ-~yG!ibvZhcOECHoSZ9i4?%!R-SwA+9VNeV+g0 zLA(Mvo^*F*b2);W(%^$d#$D+46Qo)}i2`qZ_ljIB)oNLOG(0QQ?`w&F-vNJJ?&pt1 z5aTA`OJf7~kok}>ib0f;w-1gN!;y~We>=?~$B{>UU)gj6dDQzP&+j;j3y#8ID;$r$ zm7xFQ19Bt#fpSc|9kDHbo+twPxGAuZ+HyaR_zeO}j>BVFg<$4BKk?_k+#3Jf>3{j@ zxf49##6I<-12~@|CvnA?D-r2Gme4N3mI|)Lcnvq+294S4sC-7U)OS?>%V$C~&V#pp z(l_wQ(b-dq2f?*%mn4Hr0F0Kp03BDj#v@OLRiwU*QDYSQr`*{M5AaW!y+;1)`{>_a z6x}6wWKq2dx}#s$L36e9iJm!IeQ0F>uJH=0sxtyHdbtE!i)_l|M5No5yp8X3WkcBDkbUCP@|M9x92Z%cB%QdROOm~mcb}d;aaUnVy)%yJzYJvf|9}1+2{5j`ig7LE@c;k)f7w;m zT3!b6S_~BejGHk`c(~RNKWZ?`NOvO@~?NO5dR#N@kzJJNc z5c@sLNYLmHW8K7z1&zIw$z@GY>?vpg>YoDF#Rcl~?YAmO>>V8)`LwKA zAbAQF8NKR%iFI)IUe1>@#loo)Wm?vMP7wR`xjFj7#47OR8hVH8A0brPCmj5(<=|*q zA?v01JduLb+AmeYO@ctLcNH3S&;{bxZUcdX_zQk<5+ee2BROUU)6`VXSchaWKF@?+ z4IbW$uH4?swmW!O@L%`N{LQ}qxqGao=?1d{W=8K|nxY05VHX;4uGujC_&u$sWE~LB*pJ^|$9ls`OZ^NchrLcp*|c zv7i@j&`ZG=SA_g$FJ^u*{GpWwm`j7Efv7T0S3tGIJi>gah-u)xao|7q<=_59i~&c3 z-b*SR35hIt*l@4LuG0>ZE{0p0!$w`a!+UFshj0%Xu0&((hopUPb+k1Tu$J~|#j z3blL+m}OxM1U>=5BA5Oz3721dd14&+%1K`BLGZa=DwqifVTOn)KpriHxVkS14#RK^ zEnpZv4?3UxUo8gYk-G8gl^*1UmZE(tBy zg5m5eEFpMjuQ&^_{kJCVe=LQ87QFj{gR^zp^su65e&{IIqbg9cr<-yHvNd>_V2r@7 z9cw}(2zq6}q*UcLv*Uk{@IRNSj0sjYDsmkW_KK;|oq<@RXbYr;x_Ts#T<@30x>*NF z1*Z)ErnQ~r$N&Jk#C!!Em&g2x|9`S1LP1QYnt!sYIr<_TDMIvfSmAACg#pb~fLDiX z1W;zagsDWXe}7KBn!bN|kAR)^H1v1;ypE#uQGAF$iF+v=Z8u4RgaXbB#LZbg*W(RU zvby#iC1r*Ca`4TfpnVovkb*%b&ywYN`-U^5d0hnt(w#T{%a}~!=MLzsEEY8w{>h?N zAcmNuAVCFKgC?lcfK-*(grN;uyKm$Z9>c+t2oT&SWQf(E+#4e2+h>@Se8-Z{QT+O{l`8f+z5>p+c>_0K2#YEV z1J=rl#fMTv4nxaCx{z>(tol0Xe7`rCTHZj!`0i;mtMecLR7LwzR;8N$(;X4k0@c9Ld4iKD7Q$d{BX^aCJpb6|u7Vh61Ttdd?>gI#CFxIb; zt{!K7sg^o!gspu38_i%jYPMe?sQ7`Y7Oy(@>(JjpIc2Qz829=$?PT&T5ob>qu+#AE zRYV`dTw#i@?T(E31O#S5&K_wiL|=BjZfN;<7H!+YvE+u0sn|zha5C>~Jw!y`#NC*&u>Fo=9qhZV z1kn@QpwUFu>f7{mD@gVxLGl0n(aq!AD2%o@uX#3#4|Bh}gb8ju=)4T_l^Lw}(tmRa zFs@-DO?+o%`EJ?c6hdRPDp7Un%h1rvp!<9UN4*8Cuchr>!ALp<>SNn}EvL|du;V|Y z(H}s`>XkjFLXr!y`*f5Ljk&Mcvo+5)4n25i1!HHB2}{9uQ;T_J6dJ)pBC{t` zrK2dS0vFF%x&Dbpn82o|^?a_fV?BQMjzG!7-)J1+X4=C=x(XUnoGR^S&j>TTy>}YQ zb||VqDsUf>JI^Ob^PMR0+enp(0!^1tFd#Eppmm$)TLX=lPW#QZd(1yN^G^%>nLyt1 z(iJBsrXQ^b1ROEK>5Cx(!BQ|Jh|R!;Uj`*IG6ovz?Bb;`W1s~#zFZ*Vl%M+ctpybP z*)F-@=2TEvm=YgfR}m#D%pK$W^+lo%|GH(MF!L|`ZK!1mDM7)G3yTZ(X zOv-XeO-qxoi}dsNx=&{c3pOZ5SDAPa6|YkOQ*ZSGzYaPAS7y7>tKxQDTa7k7Im@Sn z=tzkDn|o4ZkH9!DR0w(ww6JlNlr20vsep{BH_+EKN@Q%yp$6;yk$ zk;@5GQykf$TSosDt=j?zXuJJvghT(lU@pT2qw$h$vk>@RXj6_v_(o2k8W9UC(i!q5 zVsOtWS#mA|m+BN~{EKCexdF24`J9t6yFhB~$(C!D+amcc;&*BQtWRwF11u6G7;GaR zuyt~%vocarFM;50W-T4L2+HAn5T*VwYZt=4dG@&>;AWTTgSGB+QME%m0|0&t4{r{d z)~&*IRj&W>tVQrkM5YES_G@Ha1ZVxqKGiE%CRTA zF8dV5eclm*Ip558?-w#XQ-C`63Znhp;G0c;bu`7?u0xPFBfSB6D- zkr)G=j&MCpn1iG|LEC;@?LlxG4<$L5^d4J*jjkSHCV0N++5YW?z1sH$Pr#VePgO@%!Whf%8~d{eg_BfgWpCx7N9(-msM7wqk^$7 zn$Uq;zj+^`C#;>92IdP(u%$>K7=dA<3NQ#9k?Kz&s!Dk*QV*ke>GN^G#@oKdAy<`_ z*j;j(FFr0|TCu5%N$SxEgLG;_tF6IZsHRfWO%MoqZQ2DT>18ahcEzVmnL)VUX6UVLV0@WQ@-GAUf}YDTmwO;*boI z#hj1IPgsx)Vs|kU&ICfagTf#f#F)*9nFit!@8@^2aUvFV-~zL#4PST&IDRe^#R|+R z`-Rf=?HiU412p+fxWo7IE+l&J=2yvqXepDdQcyRLr@;gz}vtT6QkSp-kOrP zloi`KRjk;Sqnfh2_%hnlJY9=KAO}9$7G}A%>})I`a3GfzPD)bJw z#Kn$+){Jf_DH0x_Dt&$h&x%l-<+BDcZmdmJy8v7M{ylzP-mUaYBmmSm^hIjwgF{V& zN2*lywh%}=i(21wM@)Aif z%33dQ^-%MZE517xBWi2=IXs6a;dxD3jkrv2pVGQJV(gB{(hU;&v3iSPia?kdl%JD; z8-#<+y9P9%n9f~Hodg*Xc#>=b%5a7ATi2{6WkZ<-g*Q3pwC_Q=89NrE!forEI*XAm^Z;PEe& zO-w@Lc5zsIA3M3YuP0s4vDl9ipq)Ra?f4JWfxuf1>lsQP+TDri1 zhlh7z>n-}%H?RD4oZ~0Y>1xR_&n}EZ>wKp~d$ z`(ANCOg8)|d8DErEA_eq^tK8zUDdZ~kif+tPFCRX7L%w|KVs*h1C87=otL;=+xS8f zZ{@jaDGP$HfM#c$39rxbCS+;?xfYK6S@9ptL(0P@h^A80q4$BZUX*^ojfU}u^_wPL z(Qu!WX7WD3KJT~NDM72oyd1IP)5xD@rn_=3{e0lTN2JJLY{g{3qJFYm<9Y29(f$tF z8KfY6-mTyRGl(C}1VSgp{z{sm5({R~8RpK!Fz#Gv9zTKGmW8ssAvW;{$1p z?^wPa^i-W&e%DF&3dn|L-~*{^@0yT(KpFv72t?x<;v;B5r*CRLhK!4C=N2Td)^AaQ z@vDr$q7>HDz1}n%1;H09Op~@Ak`ctr!T+EKnFlsBlpM%971CXY_D?8yEQr1gDla!g z^E4rNf92M^#}NSEh0>d-?3 z?UP!wF*E^XhPcY@9Vqqg$>>$cTE(N5%BLq0x(vql9(NzTpL@Cr#_}Y-jYRs#KgK52 zFo0P_(bzOxvm2A#YhS?qWfG=Y6SuKtP%yk_s_^>`Civ{GtjbRqkq&)oX)ayUYq zkzcG}g2&fowpTE{u7TszHR1r!sC|&?wlK|<76Rq|mnmJ}M=0*%Zhpd|o{xV-_I^@Y zZ@;!rM`_ww_c2iTV8FS|^HvlLHE@`Y$VbafAvXMo$)w!kq4pA3$sgYhn8eAIf!~)J!0wW)N&!E7q9Urx=u6qotq&Ah|SYB$uK(!kEO zuB}V-^zRpQ-p?&}%L$WewX5ZlduwnTD-~+YIV4(RF}{zHHWAQ9St0{PZf1*cDN+)p z{)DN~#}NAK7*m%F;^TApqft1^ej)Ngr;gzVk#CSmoi!WOOY^$bhM8_MF3MU_RhVIX z=Pu!+9;GjDNn~Vl?5>l_Z0FF5i88sSg=B!FwFOMKE(;2d77gdiVx8G&P0WDND(^U+ zBxMLX;Qn!uxD|?qwHIQJ^&q2cWlD?IgI!vZmnVNkHhN~zImIL18kSS&_X=!ax(H?p zS}9MKLfkLoBbBLuaR))K<1%wTI}RFzKXnl75t3Y#4!@)gP>L4mMm8Jil07wd{wI@Yc2oZ8Mymn6|LJctFWK7q{TW%i_`mH79eh_(|7 zgwBfH#?m&p_8x)T<5AE4(&{dli1a2-8()oV<_M`{AhhkdTqIyWBcG*FO)p(2W_r~Y zA2XQv`8Fh5l81j{@suhhktPU6cwv11+}tG)ma`$wkdWv+9l~j(CbI^Kh=0aW6_?4F{Nt<~d zjI{3dQ#^@vetrU(Ytiu0vm$FP?qHpvwHQ#RzBjUz$I_;=`$;z{k~56;Lg#ZT+ig4N zpLVi9q4^S->urO-6MG8!Qt+3$4MH3Ygk|`W*7~;8XkAA|!~Ofb4^9~;Mp^r6&p&WN ziHAS!6DecUs<;c6Zq^4*%7OIO)8N3<>PlokYopld+IA_U|Ba%8!abQR{>3e5h7>Zb zy)xX~(!&2nR!7sOo-|A*pZXN(&aNtD3-pkPVM)tOL6-}zF^+h5e$P)_j5r+xC}=71 zyqICM&!*CH)tW3b&nDuh`G$TNoM5P2XgK*pHqT2;E#n;}Oq4d*-95G}P_k^cD_S(``x!>visdD;pUc%cdn8kBHg&AOI%*X@ zxvszXCa;vTXQbNAiP_A2AO>gfEzBAduZ_Ncn;d!Rf>?l{K2_ddy!x9r?&YO}c7p(& zzVEucQqpNL>H_p{@Q_((!o$#CORo77~(7wx72KJbF>4B+1me<|t zQz~%7)9*&d&J;AOb9c7r2MS&RNR{V0HMF9T603ko?!~<+_RPF zFmngVNkc1Hb{v66P6R#p3bG--CNmDYit=$KYbrJSn;NzKI#}D~eY$#rrR+67-bpni zVK~$nfCmRM^mDhGCKFu)gH<-~dz&81nXN*SedgC|=?y7+PmkX;tIJ{XjE7pGuU0rE z+N=9W!S@^kJu|j*8tYh!3qB?GV6Kkv1QQs$N^UNXTOqw0P8PE##1zXvKPJc$$h)_T znbBS8l2iVTQokDnmp8_mAKmhurI)MU2ROKO^5ao-v#Hc z?9eHwkH-o*S4l_HI-CIXt+-^*5(ZQo?e=i~@$a)Y86W_kOE-=aH8p(`N?{Fne1lz| zW8?$QNb%fWWNvf=Qx&+vAQqy0$FEWn@oQ$>%0~J&st$JM6Z->sfP&z|!!=6uePUWP&+BX=U@vfbDL)a5(Exz1j>_dPoxq905f3_d|Xp5!^#&h$l^yJL_k z=`-|_n#qlJF9MVx_C#4moKO@`spyD4K^lY_Si&i>%YkgGG>I5BP3?fLD-yS8Fd*xz zWdgl(WhmiM7aZEBLagHW+X@@P>W?+2qxldJN#qkq2ad~->71C+Ix7pb%T?J*zYWBH zGhh3OH@8=Ze;2T1`n+#hau>MeWHd&B{?R{ttIXe~xO@-qIxhD>zKPCpoKvK7F|@S! z-kNc#Y=gwv?Dr}zd01NJ%ug#t3iv50iw}SN*)#nc%JRc2uPfAG@}r{X*@8^Vvk9o+ zRA(V0`#9z!%87?Ian zIef~621<5tF7wQvu-_19qm#mu5#cg6J$}D>DdmMTOn;_Qm!SFy?u$H!@Ba*!oZb5d zt9KUKEwbyz>qMsquYEk_?K6$}trXSDZBsb1%QF@MT9D~Q%u@9|7(->Js{oQ!t8sT> z)UrK^8GxD5=;Sm)UMPT!-Kn+_OYKJn=(1O6hl9OaGoU5aG}kQ~eeIRS&;Je?JUW5A z=*-+2{+W4)PCkjY8p9UuAJ7Iarq6#lz@?3+|E@ojkml35Xu~Wn(loQ&)Dp{nlagDC z%}q_$9!M0)=3?-%*6%^{czMaq9}r0`ZUzEK+Mp_D(2R-1w!K+7)XP`0{27xCML*eW zI!yTbcaHHWSM;1jflB+jY$MY^`@j$DDF8AIfJti@t#B{YsLx>EuRA~EjY@Nr_3-9& zLQzm6gO}wiCeLA0;aqj7&}=DPSS}O(=6EShYG<4MuHN?Ng3{x5G{7F7ZY!8^=oGHv z8!eyN(A4v&Iugvs{aqa5MML()#+I4|1=@lcfDOLQ+d8hZDGxhZnecqrJgBl0Blu(t zKq8hnc)XgWVkmlcDE}af&miTbfoH`~jLB1Bl=OLTPFXd6L$HxbZ0(>$yg#6jZUU6J zrEAtM=O~-xpbMhHveU&sedZjmWwiNt&FFwzUTZpx-EJQUR5lIcDs2J#m*s(8NTI;n zv^`+gp%M7YtU4*hBmjIP--y0h>+c6{i7J;9k zZg@AoL&aQ_;VUZWA>+U@960yv6m)$*N+(5RrZb%HEkmI%XcoAjA8a>rgx!#A)vztH zoJT2Q-UzgveYUen5`kd=1IV?5A1{95K5Cm zz4ZZeRBL{g8CR4C&Kc|(t5`$?<0>S3!Z^}FOUex%x;_${vqSI1eYD6i%oir1;_J8a zv0V{7; zU85!rz&P;{cmg9BAlyPPqoSBA^rC84!OoY`$!fHMC-pHYbnPrj)oBA?AHu96ySlm> zz`wk==RRNY$8m92EpA;Ul;yn_R0v!O%Be7fxyq@Sy9$jiwwXG!E@p<}iij0@27gus z)GZW&{bIZZH&j=^b|)C_7t+GcaU(6B`>0xSR`pA;dq}me51UL~Bdmi?IJ<1!e&^;* z^@Ze&ql5UI8MGu5bBp-y1u(3l_+AWuuX7!M2v#W$*O%LF(=U|l_m_TU``sxz934dOxZ1v(t z@XhNt5%L(0uU8a=u8Ir_dXHI!rX%C->#+A%A^K5lZD;n7EXj%s+gGS@>wf{bTsup; zI~-&D4%Bx}?jF=lH)R^1s+YpJzWJq6`M@o~oMeG%_u(~4v@w1(AQye3ZrX<&V&P<; z1U3OcKhxG7*qo2FaL@<+wtE~HZMt~8n-|r%gQT%uam#lu9;;7W>tnIZS6b&!(x_iN zCd~Vd2VjqFP!1bFSPB@Obz6m9s!n8)+)T^ww8Y{nBmRWE(qZ24a-huuofkD!#M~vW z&`2y!t4-2|FNJq69$WyFuYDCK*Sdtb=(|G@u0O@X!csJ3O!p@0R&D+9#Gh!=M*>hR zmha!91cxK1@)o4?V5;nJOQxKyD(+ye_4N$mB}Lg?D^3H>@!gS$vC`7_F79oh&he~PQx;~7L5oANxZtyC_PY{Q zBnzKUi-^Ff208+t8565BqJ3QLYCIM@oLc>NztnIoQRm`40I|F|D;{-=Z4Wf!6moJQ zE{BP&IKlufoVmwICMZNEm4(yN@K}=0?o;SF1l30PpZxNr*5+o8rKCS~nY;)iD#~$Z z2k}&$0u=IE`yJu*07w*Ww>@?SoI}6X^2F6SI$00?wuG4v5afJnZqAh})=Kc;e|?I) zY+|u3Zt-L2V>s*550FgQe9@9?Utfgo@Ors`sre?^VwO({)EJXQBqhgx)40SajTw1k z83q%t?(%j48qy4qssVVHb9(I9BeQuIM#{T)G)md9GEr;S1rs-%eiW=9;MnClO1Mk;35hr1`6~eGRQb^e?%b`eWzZzpNIOfrYp*r~FTRUUjnF=p z&nD4^_8(071^J3K-O39j3?9aj%fPN^u1o)sF~j*5C?c*!{B$;9;qd|4Cx48v{alA1 zI5;@sIy4(28RJQmI4kcmYDK`{yo@Wwx;UfC~Jo7Wa2pn}jUTm1gr zv562t4GTfrYTP>xj(xw$XhU2vw6^B~aTn{^yNYH?m%k0jBni*SOb7k224-o>Zj?SoSH%KDI_ZddyIngQGB)B{ut6eQ`w6 zSXjJHG~{2%89evO7A}hiKqpki?@s=y0>uQjl!G|soK+z>1tXJV5C;vuZUm99@NFsM zs*_4%c^v#h?`uz5HVxNrf8tMOhDBP>H}6+eFDP}o*uI0CobT*2&wYW6oSIZg zxpz!&S&QzOH3SmYt$xYTs-n5TOsj2 zNpZ$8KGAW(p0$gQG4wup`u(J52r-SI5YG*pySJ8sf5|VfQqEAQU*1C$0G(#Evf{Z< z`B??39T(Np-lqrNLIZC|X%rd}VUm^h*BBt>k)>WN1x~T7AQ$3l*RAuyK&A@VsOJng z2Z^RA@>{hjBN4$n>2~HAIHa!BfgRcTd0(WM8(tVL%{A`MixX(;9;bRo)AI>!_^O9v z)cxGE1blQE@h23x5mX=MT0nPQqV%PZ`ZGwQ(-xS!&5itNvHc&aQ4tgJPhuocOX}x$ z_tb_9gTqt(+>`82e(sQXo!Y!M)EMNYZPaVl2sRY4AdQxc_1ILisn~q8i`akE7WPl2 z$%vm_l>aJG$0VKF?GvzKXZZ1GVhvpwPF3K{a|3_A$YEFxM3fhxa}szpdSoI`efH%S z(3i86{wgMBv5Y;0T6TXs>WN@L?RDAxp~mBt4s+~+f`S7%hz^?#Zzkq3fULF|rFE5^ zb&E)Ykf#WfDFu8z3-!P0VTvyzV9}}3Twm^~5kJ({h7nEUbMZPIal$$fVjl|yVCgBw zBPODVvj;GP)6T4FTieWFTgtmc1I0#uY|8+Nb<68Hk`L#ijNWD)ktttgAg6YAYdqEA z1nv>G4HQOAAo%hxT<%Qr-UCvym~)Zh{2%hO?8iD5&GlqyeYkNmlD|&fbXpjA7#&c1 z;~Y^u>`T zIRl9i-|rzRv(&lxc_?~+grxYb8P zYaTB?f_&sH&6)m1q@B%Q>Ten$f8g_tuIEV8G-?cjgutdH@)r9@B8C=>JULlz^iw)}o+f<0TQC6BTALzEck>i}Mn2`4u9DrC%G!iN-MieOSI>ynUyeUE8pxJ0Z#_A$rgTPKAeL*{6|%Ei^KN_%e#BvAAa(D1(z z4>bgPDQ?Q79aG)yG40D@LOkhpXo{(Y+{?jT(zy227v#nr{ zWimqUFO~LS1;mNx{bxUXcwJGn3uKLMv^03-WMfM;#Qa0J^BC9?Ho7|%1Tux$w|Hd- zE5P1~9zeSwsAt5@uSRCCKRo|67knEd)Wt2^v)fAZYOllZZZ|cSJ z%Yzu&e?k$YUf?=Y>o1Ek_nEx6F53(KyJEJ_)3>PEA^0%Sj7$1y9p!vQCgpu7Y#Y`7IoUjJL*Mnu(XpWO0-D(^mH)r{|utKDd*XaQBgC zQ(e>bvWEvT?hT&SZ}1C-GHs{Hj5ls(c^Z}sYiSiUGS^CCxQqhwg_N&C$I^M_y!>vL z8J;X(wu&Z`QbdcUkZ>EPasRKo!0$VlKy+0)S=r!!?LH>d_B9lsiZ2n*i_H4l`5N}N z*ZemUl=wGNbP8I+*=gm&;jr){f6KTPBTMV-|0)pz*~KeCuV2rUVc+_2 zd_QRd#^FeT(`~Z%3-!a5mk&CR|6P-Yq#~LW@XKvTQ9-e5tNwMcbOnGpuyYPN z1WTIJgwBLl@;;RwCgwW4*aSBrWqa{DP#}|!PHnk>l*4Zqx%6I48I1E7%fDKO99A7J z1;1(Hdnq~in~3y7@?~_`b2%9q(*4o2pZ6xQB%C4ojPr)$k&cz1EUK$Kiv3_i<>v+}T)iyb2mHnG5V^39h96%)q*U*@ ziJ$vH2rX_m0L5)4Hr#D`%b%vx!G?ZllFDGr#G2k@BvC8s6mQ zX~I6l!A;YEv}(Oe+JCwLrg_Xa9Q0-$238AT6c(}&d80aBHrM3ps$Rkqz`cNdf#snz zZoF~I{!VURHu+{|cCq8W(6GA3u&4UrgUZ>d2ie*Cr6o(n#E&IUKDt1Mi+6_6?5@`z zmoKzFijXwiUZoNsYl>)Q0mYD-Cwspp!x~$Mhj1-J>&fZZU6(gA8~t%3peIS7TY43@ zH)+P^e~#lTPu%V$H}typoKx-YXc=fZ^@09daYAi51v}21|CkR6Jy^>=B$@Af?L#!a zJGQlHHbB5u7TAetq?X88u?)jdBAAyHu{i9@V9!)ry*to=W*F@0YFvgZQ{jj^_Y?9+tC3P6d z1fY2|~wP#z-t}rB} z>ea7rwE?%?N^xZ^zfwTp>-Unc;_DF%TwY$@I?k6W1c)R=yYnz^PAMNftcce!jDPvT zRPD3JvYU?kW?`J5sZR#hAh;#XzH5X&<_SV8XHnSs{E?B70To+dQR`PUWii%xLioqq zA0Lf`&Sqz4OQay_QA5<&da5(1eRUY!BL$q6%qqTsVK*H@@cr27ucgu1pHrdtIGCG& zYKqa`QF{3V|5ee?fH3*4uOtw8Fe<#&bwzQLj-|-DGsM_;-LF+q>>P(`N)HJrHV(zojI#+k@x)S|>Q5b%3(=(6l^p!S%X9$f+S{3N! z4%fFHx)L$$kI5{*PrV~FE4aGuX+G3!nm$UzaPKv-@EVu!*~3k%Lq{b~uo*Y5TC}iY zcD`H+GqJWRhQ}L)HuF`hoLBDaKeD-K;SqBXJG))^4Q!d1_abDeA9U|QUZ?Wot`^HH z{i}Bo3TGn0o5n{eFZw`Mpf8xpbK1#sTD7!X_Gq=AvBay|H2sl6OXsl?FgM>guey?fjq-#TC>6&c5*hu>~GCg0Ae@kwzt`_MV;JjE|;F;sMSlo++4 zf+{wCUWBDKmlGzW5MHjt7>0kXTlQAQn!1(n_c%v(so`6-^IFSS`Ey=v<0m^*LNq3z z6^uwu9D><3Bz?R*&XQ(v6{k0in7>G~m0vno=s3tSO82Zy&F=1YtMIT$r~%sgF0Bdk zbSyoO``yaE6wJ!8G1OwY6Jstd2!JL8#A`_74iJ+w0;6`ShfWskiIbUs0>k z&icNJ+bxe{rUZR$%F2jc2WfR*+^bpNiS<|N^#PL{CSA#T;jV)H8sUxW-ShzV%H?)! zOurjH+#e^eX5Huh1ZAST{~y^iv8Tx#|cuZ(o^HR(m-?J+Yf4v~=B zML5tA+$G1|_qF8QE2{%@ZaGhVVbaywY?sw$j#(q&aMVT4|JbO=BYws1+RT8eFhJ*U zLnjcXg?(5ZO0vM2l*Csv00l$ec`RicRkSgE|UFb@xXTf;NB= zpqd1w6T)-<$ysix6KSEyh7 zXDA@1lXt;15tyvq$*lDW^juF`@walP2Ls{zKMtEsRPx74Byy_M_AVtAEn3Zml)zxJ z9|Mg-%CB2RY(2W8#+0mQEJ};fep6#z_{kQo-EN_fwV5Sv33Lb8iq7732IQ7>Om07B zd^_fDEnBY*vyMs}+#>Pk<(tm78>~V@+*r4F-3T*ve=TDK@q$VGqTfitw0vU4oKZu0C94 zKeW|^1LQF}M|pJPF-D2J4`$D+Y`tEA-o5;(uTPm#>0bTw`hX&wRp6(u8v1_exy-W9 z{UBc-=5Gbpe7`5$+Z9LeaVylb;EIDEi{svV;XTnRcqz<|v#+PS$iqxDz6AsXBrVD4 zJWn|l#x#CFH5=IMHntX(S z<*ns)+0X!LJhPeK&2= zIy3owhu$JL%13b5EqO7myKz1+q!FkgZB#{nhawwOT0bpJl4DWhZBja^y5P|OOcFcg z`L*#nYvA;rVwL~{S1WHFPRWoQScGFg)X82w9Uf40UTZG)wP$Tg&f^TajhE7FOe95y z9Dz#dug6Zv$+#i6vcD}YE+bv!r3?_c_+{U5tS>>%%kF} z4%VQzq`o~yfZ#?7HI6TJ29vn!xKJ+rzDLNx&9s0th#ZHcNcQkBYI)tMC$nE*D*QlO zYCh&b^KxS>PaLn6_8{xp>gn)f=(l*usA+|@7(IzMyA(~eZS@T^%jH(6-Qkr0r>Y5_ z(lk`8FQ4$lkFOcU+2Cvr-LvNd}R{P?}FxYIrlZzse zrt=7n@_aX;YVieTPE(syelutdBRzjbfxJ;ko-gEU?D$^WxL=3oW}$eaoKAGg%|L^w zgXvGXdo8&igbyZ!$*89x4P$Y~x@Ovwp4gwcn5h~V)7K`{^`5odKot2S23-7azx}X+^=e8exR>5 z0!&(byPh&bNv}OLT5-oN2LFwV^6J%Qe~~FXM&+z3o(yll)x+RGSNSFV00gNkmgf-c z8yccCneS4Qf&bO)oEvIoq+o)35oeHALZyOMYD4$IBGA@cuKtDJ6)AD$TcF?u~RJJ-8BV zTZ^qgF7VW6Kk?06ltQpx3fa{w1M9Ieb1fP2`P{W_U2$fyDf!&^NZ<%ji7&5uf8HY< zbCy9$dlb_xeWH42JAtGvnN_BCvpAQu#G0+ok%v_6pp$n0zB_cBv75^Us}H8J^C@a< z^@Q=aWtP$xpFeC-sW}58jyqPR>$~ndj~C!JG)(x+MJicyaJL+_WEaQmo_y*>#`#C4 zE^$K!2#eHc&a|5$=W!M03bgbS&D&vOUsa_el}cNjQ+#V1Cb1(h<1M`R@S)jLmI)v6 zzvlHvx4JU+mB5J89gD7S=2E!~KH5A8FZ|*t@F1NZyH?BngyQMHM#Bee@v8YPV?XvMyhX>3{4IasGM3?KRbcUn)P*K&ZV z%>%C3?s{?^&6(Ff@&i3sYJSwS(-hKwneUb!7KNgAIY7>H^%V!?*8MtcgVQj>NFAA* zutJVnAzuM;4?nbd@qW%9M@ktZ;oU*LsSw5Eds!!NiPKroFfHQ>4Qg*?W^S&9J0DOG zUa-=n1q7>!J4g3l9_1g;WP0m!!3|!q9qWJmkDqzx1{~p!E^Xk~J^7=0e79P&fDfd@ zQe?cOqk9k^9rg~E4?W$tTXh1E{KID4V{1bV+a3hF4H{#sVP%k{UUaz8`V}mV#gY?S zh)_S?KV5o0zTNJm2V9*EG$d-j2jh4K#P#%cEaW4Gxm)FsJvrdPgpkW@WuG6v8-Ji? zI5s#)0c|^e5KO7no{{?g>#P37cY#L_$`ldZ3a0NriJhy4-rwD71nDmsvj6(}`fh*u z3MDXs#+|gq*D$V9=d_)bl@+_a$}n6U7dn*6_yAA=$A8!4n~Jv-1*tQF-jr|8@P8{3 z--5J4MULc~`U%X8_oi2igh2NGt#H5YgpmA?-D_)gnE$z3m6GcKxTdzSu#lL#?*8NZ zYiMx2KOOV0ySh#uo1dQ_e}l8W;chYK)Cl!PFGubEq7`lLg3&w>_Q1AGhKNn8dO`oLId&QAM&zuBe5A1FA28$JXXS1NJAmQzU$)3E zKd&4Yw#`#5c?3L2CqajWg&|$bc9IMZDhse7FPFss`nCP*FS&@-!84*eL9_K2GP-JCB!^++=DkDV5-r0L6nZNgQj_$YH;`dL(IiBY; z-g~^>ILDZ;C9o19auv=IrVSDU_F%10E@Bn)+-Q5?ptAKMqI}%Czn?wI@2g-rSgVkB{F%QN$ zWS8@0qP?3STBuPYJxS+F@ci>jt40!Xoy&2aLiYBXmqsFfJmglrmRf$q7Rwv6SiPKtM8Wb8^T2e>}R~pa_YrvI4eOS4^2rM3DELHbA93_iN+Y7(40D6}pRl@-e|dtqli?`;kV! z*vk)3swPx8cq!kMh6_!xmtt(XG8-D4PcPs-7L+Q>6J{`?P!&Ty`tvD?@dNt=E48*E z-&82VI@q1=^d>NWP}Fv&s-cNPD5Py?-G{f61T-JOY2 zOec?9c77V62r~?~EEa1-tZ-EIkv6VR38N-L5e}T)UL^XUE5j-YAN=$%Dh*UcMZbAF zV9H$!zia&688+P!)9E!`Tw0gWDh)MI!|`?-(P(+FhXKv5%?$O;)_hC%-2_6)l+yd5 z$kp;5E8|b{M~nyS!i5XxcH7w45DL%Ge*d{Ab$#m>5^q8;>WTJZ2nQ`a8^HL@_CaD3 zc}4@b7B*BPtA<0!$;s29o%+T<_x}D#@jUPm1LVSnys?92Fx!o=X!t*G*uxA?cgsW^ zD=TY}wiODL;X7yt@xm2wmC0miX>A zV^XkJRz%`bXXMBCR~g75#^T`9J!l$NCo(3@ra9!-7FKf%l4nHS-QB;u$Cy`GkVu~7 zE``%`EE>l8{&e;*)q}wTA@6gPavEaHmsw;73~*un!GARz*;;=ggD_FI)JDyUJrwmJ z;oxe4Wc%;#Z;L=q>;tcD@wslgKGbS8v=W(>M_KrNPX zDQs`IRt@imzGu;AZlc_Gt7fB(cPU@oi(xw6|ZA8oJ zLeacm)93E9F`8JlLnLi^=zYbs;HXy$lj^G*Riyd#Z0@z>#{+{7q@xRzf`#bY>;G&P z4l!-ZohwnXnVFfr1e9P( zLy|C|_e!89tV_lgd<6oH-Qum>20O5?U3nP{H7@|TQZu~+nRx_MN zEK;y^pA5*j#XjXd`H}R&ZU=%4(SZW5{}CB#<*2miU)%t(tVK9sRAm*OU>0K)M;wZL z!{ls?S(6_hpPmt$l3LCUwP@A?D;Zg|+4tQBK%$*I0N2d{l_flb7I@!N9!ekg>EGo| zv@f1L8@iHPa8dUi+^*Mooc3&U52Rgfj$;Ar9h5cY5YNVpig@ks;*P6jk~ucs<>G(klP_4X6RI z7_EPzsdqs3Ct;R|oHu@?UWY0DAT2sNJzYn8`0W1} z=t%q_l=9tB*S1q;pCgK+r707xG@PZ9-&OAIFz#~!*$y{dy}(lY@l3mGcoWAX$6f&= zfXE@V08TI>Q9b?Lp<$kia>hnRw~oEV$f|!|l_(>|{^6U^C}wy7L)jw%1JEHu27X-a z&Rht6a9@RjA55T+M)xl)vpCzb*GC2S&Pt5k)B=dkDOcIb>$E?u>c>NhxeY`qclSCY z`kN21Z7(>o6vQ!G-G_39a!BrR`7{4~un2$ryC38qeOKTq{&a%^=nsy^h)JQv^4DD= zDvvv%jOK(FKo!*LN}l|lKYn}&hGrf~19jb_Iw+3*uQqF%(Ho_PX8Yyl7o1;hOifLZ z8Q6hW{`px1{`8X-W>9`5iqn|r-|i!$CIVz2!-d@$WbG!PtB4%%YS_BnC8@Vx6y7DV z^!_8egfV$cM*?H=)f;3tdwM&EG9qV2NAE)Mb%>es?j1Oxdzc^x8lyLjF;~_JjYs{o z0x4!@y6K9{p|l|#ip2u_uK!F;KOI)7E-?F{%XJthAcpcA0jyXUjhQ0?%i7K^#8sE( zX9@k&LG59LHM#5F#H?vIp>#K4VVr4<|G}>tTU6jByZ^@+}P`0355Ku~9LjK`FO_(BGA7y8D$7xUC^Kx-9X| z*p}T6b(?Rtm4>~r>~#>@3yUQ=#URd^y3 zTVzM8XrwA-ZEF{7kj%1oOd!C@Xyg-%_^;xiKb+hvS)l2p(`y09>rGU3?HBGYPnEk7 zzBtd%RU*R!`u3jx5WX8iln|c(rRw;Jaf~G~KN>&RS0-cCfB5Fw>KkhaqgsV8ZGIKq zWQcNPT4jAQ1N8$){45q!OK$#w^FJ)?C;Hsi^uoQ)`*Z&EH4wz80%B<#CkGczyKb9? zw$OFa*H9$v1BjeW>m9rQqnUqV8Dygw#P5m~saN^EF)E5lR(HH9fdjCCG%eMEIw8S6 zg0JzjmjCftF|NRUEi)bA{KzMRjA@VwL?VI1C!qeA9g6V6cM`!ttq_5`);nRo1VM^0 zF*`du<$0+fi{Z!9P*F`y$TDn^tWzQafU1#F#vI$mgHQs5#wi~i!g*jxiL$ywalPI% zX3Jj2W~pmfnM%*22xp20H}8cJ$R{uj2n7@YgxCO-wEL3_+8U4)$Hmbgv1`%QOwsV` zn&@!BAFlwe#*`LAVg^QG<5b^jtI58@=o2n};H$ z-avZp9>8Rd92I~vnagdLg+lsv;Q8CofnG=n?lgdvOQMAL>LgTT#l0L5QyGqwYD_59 z)7M|xq&4p?7DfVbm5<7|4+M>$r&_JPE3lhK3oSQpv7n&Hui}-%f}f3sT1u*BK4(W~ zXANi0uQD<}QoAq+Gbh?tXt9kV!y#hRJPzL2dm#^~LoJ6E%^fLZ1;mkb{jUc9@$i9@#3CWonH+eB=K`{;cxO}=;o_rHN}Tdp z7-EUq8HpbS5*IZ!dyc$M;S=I_r`yl5j)I5OJcMUJ6ax z`p|b2_5m4?lMa2Xzci}<_W;cGHRV7&szX~R6CG+{B9y-4ouJF%KLBvL_aGxYbX0u@ z+Efx*%IKTNnBa6tDUh)qplXN7KmGq)dj}z0d!ec6HT1O){d?`#=Acxe4A^yN7v_Xq znE8d;yJ#gPty>qt|4?L}4}PCWOH0e)Hc{Y@cl_g-9Es-u(&5gDd5*Ryvl-Hi8)A)- zTc1R39e@Z-4maK3*0@2v$H_=_e&_6sh2hL`qEG=F6Qr;r|9n5x0-oKm=CB3Fwxv$} zF)nh|O=RbP>s}4r4T3tR9F{x1`->zC8TAk7%yddH^r;Rn;g=dhcNq

    |Nn8%_W}K(D!FUdm8Zqxr{EdacLJ=U$?fg8QPIrE&$@a{AHyo3R(B~zYIQ$ z0d3Hu1c(tA9DUb}$;GA@&e-M*?r1fR z$ZBRfsv^*vRE#i&+@J3I2vvP9+<38zCMH5g$P8ndiOT@Eoa;mCNfNzA<`+`d zMilDXhu~vmk)4gbkqJlB#~E(!^8RukE0i(+8Mcb7y7xPAVa1RHY;rMoAP2$-N0t*x z0dh|t7+iE+s7B{}o3J2>pqJqZMV4*v>xb1W?t41z?d#jV8Q9at%>oIE?m$l%Eb`}* z0d8Nv5a}?wYwr!?S;G(R4MKmSM)cEVY5TNR@z0WY zqR)Hq)PwzmNCmg1;&E4FD3|aFaz0PG>S=tFI!x+E*_YVZkE{Up6W?~$;eGnZi!8TEe zNl)hpW>LdO%AUER^^{cQG`Ti*TT}%RU!y2Yb$|QVtFnosaw4&fIKXtf%c0s0(%}Tv z0Zb?*10Sge=m$V*Z-}}X63)M{py*sQ&`j@3<2R5D8T zh_TkDf4Wh7^tq9!siToz1NAMd14C}kM%L3VPw*Q9cf(sO!QKl!P>9Kb$AITy^uZ76 z1d7&B-V)274!a!(bTGY#%omUTftupYP$?cx`ZI|i$hoVgb6VOm$5Z_UW{}wD_&IQn zi;}Y(@TnBCv1l|TRSl=#KK4{K?8y#Zgdzswgc`jTDe$CNvr`@<=BVX3g+ntm^rc=J%L?_0f75n+|H(~r#X6Ny?_}mGyW=~KVRiN zmvk31+s7jG`T3R8t1F3N?;)fxSEIUSabIA5JRT2)6h1&Zf)&zJGStx16Sd9z3|Q+` zRm-C^tyL!<&dR!r72z6`up1QNhO;B2z*yhbcC5sDY^IXE_-k%l(Lmo3Ddw4hg9m*% zOxi}J`30GzsLXap=6Gy$v=#?DC_-r00)UGuK+1FZvr@q$en*B-+u+x(lFkseIi12c z_FQy^e;^2`&*mhkFt*@!T5!5>P{G5W9*U4*@1eN`6WPn5V3-8JJ(qPUC>yx|pv4@5 zy1XzjP~fSB(4T?>F#$L$AK_J-OTh@Om~hliwVVP4J7`--KvYMQ2MA2ZD~47WT`h$T5%FVC z7W%GS4_Z{xp>ltl_()6_*6y*ywm`Y@l(zJV{3!V%gvy2yDqF)3$DHRfC8~z4kfp+( za#NInbFG=&uInhoO{-7GD*Se>u#IZ=(|sx4!l%Yk+Gsun{e==h94>|fW{=UoM1_k; zwMN@|2Qrvx;|qf;2&idFWLh2#cqD}K!k?Qq zBi@gyPPC@_LzkB*06|1tD(g7-vAdf*Ja@*I(!58_zB@KJ(*AS05A!~jALtI^=U^Jysu?eh5F_GQND#d zB9u-`Uy5HY6?=HmEM=9q5Niw+S8A`Es+0LxS)pQORKaZ}e^+Kea&7UI&W}F&ch`ZZ zsmrKE=oI^G+IxU)Q}>K~M7vcrGaQwPBIpZox^{R2qjC%cn*jtAC85*Flu6JDT=U^3v5X;TT3~Rm6cTnGd%rV^iNE; z0gxud_H*!PT3a62-Lw~R9m=$;=nmIr>?}KEXI);B;w&U${ZwCnykPW~d&kfxUyfK+ z0T2q*JxINQ{c}UlQMxB_YZIE=OVhCt@F)8L(q8-I&T2~+C_k91W=?%%IB7zem6Rk{ zJX=Ir`J#9!j<3GwVU=|b#VO^r*@yp>+lclA%Lsj+FZy8(%`6Jlz=ekjn3tsO>BM3# zBdk4M{g@cdHKQ*|1L)*p(>Yezagn?c00OPQ&v3md7kNn3;i>eT(*%!^$k-B%#@WsG zk}gWQ83+1>HjJl<{{0_bhMjmWW)P^3KBZ94|27CH6J-cM-6pwnh7~Q=6kcLM*wzOs zKp8_80a12Nn$jwIqd2Nkg@lt zy@m0C=i<+Sy2{Q=9F{=arF$@uPiXG@E1Ibmc+J)6DVvYqU3PXC+UbsE=W`0qOW~yb zuNkyc?rPHhkO})v07E5RhKv6fJVnadI9eC1tv~$7zYuyJW4xgD@oI z_IF$P;jSF^Jt|7l`|$LeIQ2!mo^%{;a`a7|K86AyFA)+F5>L*um?&ZAzJs_8Koeh1tx_7bQ zlI}v*{IJEN*TNg4asCxB(1t(HD``1_A-b2G*>&;8xetAExn|yebt&F;mn?rv6e!F6 z1}!S-M)==|-4h9E)ayM)*NGotyn~KuKqcq1<0hs0U9PKB4Gj$(eItoqQEob)CCXoH zHcoq_kG~#a5|PJ6hf{t@9Ed@E@6==+P$RT3&kmB9U5N=J&6WWqsfPufhHH8WQ9+GzYH3*kKTA zF^{mo?$Lf9!Jqj^e90*p6lf&?f~~r^arZ8J&~)!FEKJhh^WjxJiM)BI5_wfMI-T-M za!~jiB?O}BY3etoJqzNHH93|a*Dft>+4Pl&ypnX^bp zp2tCTv#*D2qoOcj$6wvf;=aKamAMzV)@b|k>w850C#u6+RM?{j(Wa2tS8PA9w4nO< zKs9lU|3hB0j$;UV+<@PBNtWH_leT$1Q0}WE@-sb0D!s zg=aJm0HXR$Q(Q&`FqST`C`QX#PBr)WkbWdXi)x%rNzX^j{HRb`g+YPFGjG*n|EKl{gCUjwHv^LM!L$#SfQ5+;o1QR!iuVmq3kxYyKY5 zt_GEqC%(VG=lFdnZ*bA$Vz+2EQ5mQyh%F)uLrpDM$V4=NlBb?XyJHWShrTH(9ZUfe zljC*emhoo`3zE2Ip44?Ne7E5L7z$$R`;_&-)OPuH_P)s3TQm=1%zn!w$DyPN84STM z;b#w>FayvnT|;M^|IZ=H7Uh&DH3HLV^g1HJgl&DleM1TsVW&#E)H%Cy3fxqPtv_+M z3s1V$@D%fgd35Xp>RxCj!C!%jb++`Hqt?~89}XSQnbh|q z?N(emj`g2I`OmxNkHjy7UBUQG)K-J2z1;qfObGg0gRHmu?=heo&ovvNqF!7G5aja9 z*lg;@2&94>-<_`@6?L?yx4`~=;e`RK!a*RQ^WXFnH=>`OHwPfH1H98CDAUv;9?!!c zUKpDpYz*aiC-FVx^;_l!_H5p|D1VVeP^jWQS+J7zRWwQbXYt>-Vu)bW#MIYiqC~Oy zuV)l$cIApbB>sP)@L(_3+*vRsP4V0)4*iNUNPJj@wLnqY06@UKqq9BbQ5PXVFX?a0 zHuS)da-vI3shUFk_W?LD3aq_6t_%g8Uxto4)^jM?m8US(IMfGzrUFwCaY5;W8)(tX z5?jK8(4IfIZibZ0(q%~FD!^e`-qT8qfx-ieh7(+}+?wttU=T4aGrE}e|o6qR{g}c)B7K3Co0DjCS?k6%}giOy)wMuE~bTq zjcN>euVc(U5)`7+5ev5wkEiq!R0}|EoU~6V$Y#|c^F@V4pp@!m#UcZ%&o~TavsW(^ zY1%yg<$WP<1X)XSO-J2@!1#vWundE$P7f!&Sjt%qM@nnh^SNfq_=#@oOIMop7Z7MkyB^Ut3*r1|LqCAODJZkc)wYG z+@hn|!$oLyc9MaHf z5BQS$(_Q&tm24`drhe8eYr|Ud6*-Zsa@+=0mH8X7fqE-tm*^;4-G6xb|Be79d(@au zagBF~xcIo%-Y=4>tHm_eRc^k$?^*HOn2ei)iPr|Wu_1Q@kaAA%0p z{oT+9m7FUDFIiflW%wx4<%Z}$mVpqb`m43RbFahe?ujmYIaQOJC=B>hcH_@zTd%}K zeLjMD$}dhPrUP5?#oyiglgye2<=n-x_d#4k{jzKndJ8fC-~cFvUR3~HzJWayup(F= zZ0T}@i~oFlUY@;hk+Jy}9r;LWkL%6*yYcw@d6(*}=Z@*-@Ayf`Am@*fGPH z!}pr>R?q=e4iIz?LnG8Y_W0<%3H?O)aG;A{X&|K6 z(e~URH4H#U)6D;_?Z2lN|DQ|}Hj|_-z-pmmbpan>zMDQEl7WO&sUM*C_mh9MS<7+F zUv$yMjI8J+wy&+k)Ss$eWSEIgBSeQvYc(&9c@PuJ>&l;0}= z%wAEufYZsSLOU@+7FCS%I6Pb*ZXO*?fFbX9mD8?FRU)yA5!b1CzE$6LBA1Q#hMD~X zGhvk1nrA4SXJ1Qe{H?mm;m~z=d2laL z8N0TVI-V)pOvsG{p8?8}nb^FcipGOh`ZJ`swk?!kCgRp zE~#*AE{@Sr_Wqusw@fM%#~GK73JpZ zU0>&t%mo>y9R|zoXYYmdKY3wSaUhj!)XLjeO5jYLz)j5vK~vfHqWX>z3l&uj_4V~5 zTx`Gha<<4w5y*(;+4J78CWD^yfxKTfVE3`7!op!wpIMw2nZ@MDjE={C(a(g6tj+N^}&=sAAr^wc{3YF zxy}f68Ts1;jxkIl0m~`e;-7aCG0&dS5wUAW$RxM!9h;u^gK-qx5c|+}s~Y*UXau?x z+XE7qON{6r%-)voA+T5{UPM^RvXQaBn*AFBAHq9H*f$j-5%EoOT@1HV>7xKhP(n#b zsp66w{xm>?-3JJ-os`lkd_Yu!!{^#a^oj1IVIDP?~q@tI3TRy=4qtF_AIRfrF-yjnJ4=`%_7hNhpt_8|+ z(nbaT-3 zlLv!L;tC4|LzcI##YCwiRbolZrrH?V^u|HT=nzOqtFYIgk1;;r_8rwfMBC%_pm(q$ zZ?1wg4l0a8)RM2HQ^VA#=pBfDF;>K*Dp%oVK$D)rZmfh%1f~8_&G|zVLdTJG$xg&b zHp03@s?8DTOB3_mhs-TNk}jIp7LYg$B*Y_kvYMBOGoDSzO^I2JIM!RQ!~P&>2YKRP zRxB%|5-fe#e;?ZVW^oaMXw1$C7{>4bbj`uc=etVs!rUS4Q(gI7Ey-$uJ6R1YvYKbj zXNWPYiNk`yyDo?U5z_t=x-x7h+aB)ZF)xv_(x9L-hcS65C!$az6gW*km4FPu8ViPYeLvR z?4TGVrUQKJ%d7jVqtW8ADXdgQRf`aM>Z%M|V5X4njrI(>oydI?q_(^^iNOwY%wFhl zIWmtXHw=(~UqV1OOv4$4v50?fzKU@p3{jyH$c6q+ycG)SS_C2e8|3o_;OWT#`Kx9p zn^H$MHBnuKDHveoHlHzByGQ^T8L$&WC=vQ_^te+VV^rc_s8ASth~zW# zrLC>4%OJ78BTGRN*r7Sk#1d1qgS{UMGWmA_rEf46-4O|WSP?s+<7|W-atH&>+i{>^ zvSuqR#po*rRuH1jPSxsq9N#iKdG0a@Z!4e2u8VO{|r;Sh|d=m6zDlAch;38#0e%EX>ML%9b)$?0H0M@&92N<6zz3jRA0+$*H}c?@-IV{;QDW!_ z;XlnC(80PkDkbWTpjpj6c8%OW-pdAQ)Bv7Z{=sGemcjzgb5#lma~p zfW*ro=GBjYwS`eHiriTFGSs4B?t9p|Z+AmmTidGd_P)FT!kK4%+@O94spw~BjO&o0 zvrz!Z)!q{bbq&~C&U5W9}8sM-U~F2@S`gRhls_|L;! z$1+HJDUH7A+<YV&~P^}L^tv%3bwZu~oXxaVZ zlq=|Mp0Ymgyx1UxjEbKN``}Cu06x8)`g1V3m?-DYWrGgphe#3TJ560EJhvEo7IKY( z7w3HW93&Wa(j6Joh;w7N))=!gZHcF+C(=s_wsO(C=CzCKQ@tBz=c7f6XmJ5#?+NX| z)7Xa}T&Asx{A^$bMHvG6dR)|X&2*Z79S|?gYSth6JJS{eUXf&hIUq-;<}zkQ&41FppBmJAbvmPcex@PsAxbzU%YCD$Lwa)YhI}!bL#VWgRJDEO)0q!4VikDTd5n z`_mutL{w@Gfzf_Eyg{LS_OrStV2t`R%Wj+2F`47wFwMue!+=&fMI|NSYZsweV;=DB z)gnL0u@}*rw3eG{0sjWVIO*zI-d+|MAgbEEG4>+5`mpQl?l1wHaTPOVDi~X{FHfh` zfU^o_#1GE6Z5FP2m>z{L&{F^x`KYf?SG=D9J^f(Yl^%nYdPI4bl$79|;2cy`jFnYT zzIX58!-o`S9^YC=GZiW$xdoIbWX`(;g?vL5*zpA_`7K0vyg;lYg zzvxJDEmIA27okra9UWa2AZweXzs#%#2fqed@M%gmKBuM@n8B2=42uCtTh&*xp8Mf$ zO8JnaoUE;F&KVw_0vCKHlndm_V>fC)VFny;cPvNUjtEJ)4^T&bVFAmH#=*gXXfHBA zX{zVtJWz0g#JO{7WM9Cq%eW#>hn_ISfpzrgQBM5|Jn?^Wb93b+B|QP^`2>D+%y#=l zuG0Q7>^W*lBGg|A8?dqndf<-QPXS#%tF?dPDOq|nwOE5PHQ_yE?SG8wAiPHb?@FiPK$mCar)+_CRzaR zIz!8I_UNBjZi^vjX?Fz>3XfesP5fdXj0vgg=QFW;2?;2*8=22CGA#fCz_>=C3q6Ua zj~)dhRMcYQ;<}GbRi6gg6BrQ@!OZM{nfCbSeTnfBP^qPd3(rocbwSsdn&vf_4EGUV zrD@tbIHjvgGa)%St4RVq(ZU^tb@uF8PQX%sdo2wR|F2)a&cPyr+R1KCtGgSVi;Mni zE|Tq5U+FF%eLo7}4!t7bATU+aJ8IRPlgN$HhjPoO6d9ugh-f$OHFMmRHOS3bFt2oS z@cr{H(4V@q?+pE zr#e-Y8~Xr}mJviv7M~wYkjWvyvCrVU-J#Z?)*5rxBl%EJWF#wqZDn^{+>K@q3oMw{ zgr9xjrjk-%Cj*Rousy+V`E+CENvFro1gZy)^yM?lyOtG~p_E5EF)69=)zg#Wp6_5b z;vitO20^)qUA~SH@9@ZohVV73QukevHrWZRbady=(+7f?r`-%QmuC_Mn9yQdNIV9H z$Ds#K!Nkr^hNt-B0MF(#W+xj%3f7f z<#}FVT`=3|1BJNW&g3=?av+M)Z3TdWllcnVa8A9leQIEWi8Dpg2?40=$0zB6wil(P zN3T39VPj>@-jY+LL0Gk{*YXwGsq%4DEGMn=$*NhKeO5G^6L@U=GueY~iP%kRzP07m zgc;5k1dj?jt>1_7PTQS^yZccrvfXC41vpsuA0sB7sG?ynS`0uMlZa;WFB>{IBxVM_)y*)jyV^$)_Xr_o$F|NgA&G<)i z$(SB^sO$BI^*?4i*vreISn;%?X7FKEfCyE~DH%_gP{Mij4R+Q0_mhoe5f0!J1iF$f zf~hv}{^|+z32!Io{SieS2Ux}zQ!)*5;^P^|dP_>;B}9VA+Uo16m{6xhMRf`?26Qzv z!jZJUBXu=^DiVb6GBHVJD3*|rXcN*lB2T6hwdL`utH{iq0-IsJSyiXKGtw2#%)JJO zm!-0m&c7qzg#8@{J=Z-PPmsWnSpH& z_Hrj@*)t~(50B4oMj{Zil(U&$Gk}5L0goRuefsoi8c&X8OIZI=p5q-&o9T8De=e&a zfAp@ue+UB54vk$$`7QcyzXG67PH0cPNI=-!wHHQxLF_pmX6GnDRDuP<$(aST*-it{ zF+dv>6m)qey{TR;Hl}K@tE;QDR7_6kLEW%!`fg+#ocrPZ5lqPe44Y{(8xa;|*YA0U za3u^@+69vbQEpfSMli(}ysCQsp}FK7(_GKpq~c=j%P_D;!dQ=b2Qs417Iabf#p(53 ztuHkgpqI!S{K_}e9d7xm*_T(3=M7OZr((Xa{uF5g2CW~6H(Y4@kQXY z6)85G=$;PGo2uO;L`3wI&a*JCvFROj9i*D55<-~MPY5mzu!Kz0i}asm$AvY!K%|)5 z+-@{Go92aJxQF-LHIZ+EGrX3TS>QmR#gWl#SmwsK9yR1`pXx8Au=9?{x+ocnvGtwk@7XK6{7_46QXgIDpW9wpgK;p|ZC?8jv1W9% zq@U#E%+POs)@rg>%1ApqJDbRB;>IB&K?Rufvv1kGRBE%D^US3=>L&vKteBf#)O2ni z^53;<7bl44a67|EOLx<*Jo8==ADPwGQAw`tCG=ld+B9Mrv8_T(y-}l}HVr^W5)p^N z#9Ncxk`&EIa^k5;gYv{gCLg8of7fB3Uz$U6XMV1O#b2%b#RLpMc!Cu^5*QFbJy7AB znID}gU|?WyH`8vHZA;hjy*m+8QlPi2l^<+eL>rWX(|K?EubAw83l@`)89B3{+_6`; zKK)(UN$#knsHm6j=Is2c95XLV=NMy3e2D61e(yl=Qb)+%r+Ke>h8?qgSm& zjjS2j*+XFDm7qRj#Wu`<)(WKq*wU95^Q0PG7_8qOx~+6Xl}qxIfJ)dR-LZ zciVW?94GLS+^DoVK@F|2v zjrv+<`-5fdUk_aT?|qf3V9_!c$Oc=O5$!vCIJe`)lP9lDI)@Gd(dn^{%|UuLq<6f2 z1Zqq#uO22j^Uj3yAR<87ao5ahas74bdZ!`=z!{}IeY*a^=Y&Wn7{jI~edC`U2}&jq zlv;E(c{~T8%~frR;Mg&P?3em)A*>*9c`Ox5xs20$qN?jENCa(@g~b~YN}C0juAMGG zLt$vnN<`%ft5XI*R&em}Tz)PeeY0&vIU`n9eemzUD)WjozF`6UcfY?f4N6rJnh0VT z+daX^$e32({qCJ~k{LN^GTnjU!9i)2AS--RC@ARWNZu&WDRGU(*UUojo!$8Qw&zJLzlVp04dRo znYnogM5M0V$jol+;6p5H8^&oV*ukrdF)8WiIQ@woYG2t=vIlo~5`~XU%*?Pu7R4*x zUs4mo2Qbxo#~RCemwu0n-3sUNdSYxCfM#ZK0Uu|TxM$OH;REQFeXs1 zh&9MWwY*=iM#{S`l;dsilRRSNR%B7T`H7!CYj(0-Gv&yway6@4(ga^lj}^BkYVO8V zT);q0O^w-9#rn1;C5S=$2qYBnMY>?*HdF*Kbmp4p<}E{XP{!Uqubf9N2!%o+Zk61H zd6>l`D=zL)o|AIfb-s#$$o$~`{TU>!dVA6CD?1^db>jPpYp#sBP1F4HQ9^1kKK#0! zo0LFKR+col>NMi@$m+Kh8~#VYT%@7c9cT|2)ydRd5aYkxnate<0NF(0Ow7zf5ZBIX zYYD3E=umgqT>rw_BVbfV2K!HMlSIM5MRi! zo6+8>@o$9tAbFHUrXpRV4}z+3(9+XlvI)8GN|W&HJwbhKFXSnGYD)kQT!N0g!wSZB z$SJC*7^Y7^mi}6Llq_ttv=w`vz3mHEleE`bqkz|&7f+wQ1D9^J{mbXAJfrD-cRee`SL6|4n+Lq zHEys$yaEPsF%WeMBq>X&z|@TzBJ&f#+Ry>;QI8aDm$Bo58!D%zpVZ2lP@JIhzN3pg zAAoJHwH+#efLR8((NfRlb#`!N*m#6+X9}(}`!}E*L>i(NL)op`nFWQqs;W(3%JxNTtw!uKXBo{VwKC9vTYk;&L1T8}vU}mzJ7mP1NZ!ErmNw1+vDio&66+TGd!g~V2j>w))Zt+5Qm=?URl15#)ck5UR>T+658%>*sotH&@H}8B z{PANvA9>xKEaL{@t)9ZA9LZrgxf*dtD3dS295=1h9bRQD)KEZYEew?^% zNAHIZ@=*@MCm_<9$D)~6Km7DukU~C8MO1@PmSyGTHY7fhh)@o}*+ctzwqWSsc|XgQ zh_`Rwo}*(Sv6*Vml)4x#Vy0~0zY}9hH_%N>E0|I>^N#qsg+r{Ll${}a>Ee43tK5<&*F+}tVv zv3ZKO+dA+*rKmQ9(`qduo6kw zCPH~0dk$J&Huk5Ga#>e%WP>>FBZx>iEOv#FLH zy47f`dT0UWOUYflSQiv@(~*#rlo>kL^RV~qDL6%ab$cZ}f*XI~?KD)~Fz->GaYM2c zmnskR9y!gX^qzMuqSzZ^g@P{URS(AFldFUY+NC7@ooC$zxW4b~iWkU>^3ELbK%}(@ z{po88@~IjPHf#|KX1U$;=EYy1pDbYs2??1fB=`WDY9b1kK=M~so#m2^#y)$?XY12_b(!v`ky!$(eyAdG7 zh(#@q30Ky4W8XZ{T4@?Q>6h;ZvU%n6gwb6Y=+=S~O^Nr{GEdIo?j&9>hM2*V-uPd( zf}o{@SX@~K8qSuXAZN210FpZ zYzZayNYIaIwy1|u8^lS=cXx91mHWU7o@-8A3og` zzUSTTt3`NW5LhlUfDEo@+2ylHlnF$TXF0>^oxC9!E;;Mz-&<3+_gSm;J#42jkl1Wv z>zx@V_4@4dP-+Cv&L;F?rD~P8FknS3QmAro8O%wZMw&ah<4JrlzSwvKwnFNq=TKsS zC2MYnd#GFpXxJ~GSV}kl`}W!gaRS`Ch=WM!CTyF9ioFmk_yCr*`5Z#3iXKu2rGXlByA#_4Vg9^n$T7%ocv!TQjlgnt6Yeaw4Po@C-4toAdZJwb|O&v3ic-qU}eSeZ9l>Bo%m67Vj0vrd$)GhhmS(ZT$(SeWT^Q-d7Ry2m2ZY|@b#&N`L*?d z>~sbh6U#?(!_$*zA>Mlma;$X}IdTJ_v#X$>ztEbUcPH7JKBirPNqesSOR{3PU1z9e z*cs|Bzgt-!F`W2>l@NbD1DAoG`U<`8`G7HfC4#2Qfueng1 zJi7~@f`suN$JGEbVz%o=akMh4qf@P5`WgT(xaH#LCGp^ZhYwG}Sh?HSd-sy=EW6yT z;cieqy}qUZE?C7j6-k;?w z=2LfHtPP%?4^vxPgdf`OhqTB$2=7i#PTA)|TDk@j@nn>Dk8ZEHv5%x(0+m4eI*aY(a!a8PW?cqCW@E2Zda9F} zS{RZdF0NbLNl`%QMPQJr+i|4dv|tDKttH+Ubx2*6hMW1YFF1X7bL3ZRHG465)s!fUl=$|C=FzQ6w$9~DFrciUV4 z8|<4TL?`XRj{vK!NSv=JH9lUt>5$Kk>M-)e;qdGj33h+GiZrStl$7j9?5Vy#DiP|Y zWUpUOaQf#c&bKf8dZWmvF&YP8vo9${eUO+HLI`m<^cN!-A%Ew2UYvaQOFxgB=H|S# zB0EX(7x;r>?;cUhRNRujvtlTB_rKQpjNRPyU0K0Lj+pGouGwuEivR7Veox5|zC6B= zlE;plLA3QWvC^}%Kr}}pcIIDyfBHz? z*WqwF5s@D9Cn??yU0p0EPMlC}Rzm$z7;iBb#?zo3_*Z@(+6Sf<>{L`#bgyK3iRsOo zDd2{eZtNOGvIhTt?Kl4tW59ZTvBUU(#D42ZX;{V$HLFRkQwen;IkpR}4;>|a9!cb# zPW=3Y*oY9t z-p9c$#U9c(nx?pn#kk-af?%ja!wcCK3T z3;gYw?k`jwV#S~L;`#ReQYuuA`{h&q!dDfnAJ$jc-;2Mv!$K5iIS>ZDj#0^bw%+S3 z9dq)P#Q0n9gBq8t%c*dUad-M8Ddb5G!O7U)o}?f)mC`octM{&Yt@8rDs=(h*7YLFL zhLP?>7Q8~%HRGuLo@nc71z}?032IDw8F(5t(n`q^m%niDtklLC5qG`ze#u+T2)6T; z7`6-rlE16*G_EQeJ`PoXv%_%33^ACztI;Q!w}=&WlS+ZY?R>|Y`a!h>Hz)TpE$IuaB{uMUA;d>Ab_POw+ zp7piREgN8;H4UocomJy8&FgL6aZGzcx*CvJ2XsCeR?poVzK&Z3#_UvSehW4{PPyax`u zjWuoP*6U}*hJtJ*wbT=nzc+u2q06UyQ{?Zl#i%hn?=1zNDDx> z%gM?4?4{x0{>H9Z9ZB9J%J)_}lB6Y~i6V|G}h*aJ3uter6XHKcZ~jrA5- zuin)YsqLb zpcO_5;GpTDevoU21R~|GA5%1<#-Rk~QhE~gGK73rcQdLkxT(6*G;?T9mA9IvM2mqk zS%8gUfw{0FauvSTjO9vgDC{pC9^Kyjkj2k!j&F2$_?pkujv@!Qzz8k(!FQH`3vkvf zjWQ^&k=b88KDcm@e}0gXx5k5gLvABuEk1%z<0O;XJ*YFfPhfm7Iixz2>@B^*rAsGa zN-@vQq|PgGWOD=j9CDb=eLUA3prbxge>}vFEy4H!6eEn&i7-jfKV!Hjz~pFOa%JvI zYmVapXz<+qO_EW8X$9E0*S&2_yL?_l7(R(|Z)9+=ZMwJr?*+mCYy{1drFGt_w`LK6 zH+uo~((8a$PFiE4q*?~6e3Xk*c-NOC?)hO=uffu)19~_0E6;^I;W~{)j?-jOKPQzv z5ThZOlQ;Dl`azzhr#G0*?Yq6b1p*dS34?gWkAk!AtfWk^Eqko7XG2Se|Fzm(W zMj96Bo3b->fgb}6t<_(JV!iV(>GJ+?^j3~Lf7&G`y-d248+n;t(~b7v|7GZv?++7o za}Ajuq2HcmP}4xadS4uSl0j7zB$WDyhSG*rdK-Z2*B;_>R{gyhTS$D0d9X@h-8h|4 z87+ZSd|Ygx!DLJnzQO)m4$fFq_2f9?)C->nV#G-Y4ytWD5%)5Ha!XDahqkj?>ryKy zeOT-=dix%+GG5?H+}=~oI^pdk+|x+ul2qTOeYQo|3N8GA;Zfq(0`a z{Hs^k!(!>!{73_+PM%#;8qC^TE4I%VYiO>LHbFObITwedAIh~)LNm(4+1Xn=Tnrb^ zyCZY&N8ThOiwVKKjT3G#9(t_k3{O&7xt>{N`QPo4DlFiXhLjia-SQyP|M=Ru2z#%| zJs)ub!+_wtco9z%D})X!vEM(Z2b1FD+%p<-(sy+|Fzye@&gHL87fz9(Ztj2*iy6f( z-(?q3X+bE+`jXg`$+B-l(dWi4y%u~Txy86xyGX`hM>AdtRhHZ$_K6&L9N`SPDqu1l(vGsy609}D-4?(go->)%cmT;frlK2vk)p3j{Chh%9JQgzK@T0*6A#qwi)4Ei4( z;b5vP2WGV!X@uSl&EdMcr{X@UBoFMXFvd0J8eTPmAy!Cz$G!Xa7rE72zD1}0upC`4 zP}sIH4TylBwb_+j^9kF`PF&^v@c&Wv7C=?5d;72=Dp-_sNyDN9QMy4Iq#H>=x;rg8 zq#Nn(ZWNI2?rs*{`P~b2pLgbe-uIkuhRx_!XCBslKfk=<4Qr5U)^ti-TH{=8KmRK!Tt`k5g;X+LrcB zjEo{KSuqdxe$VpPEgEtSu{Fv!Wgs{3x;+GLJ>>|I|SiG@vvjmvW z%zv3^S71X|)}BvMus}vYkGxY~jR!Wh36ab)0r7MZCPI%6Efb4=F72C{VY23siDm5# z_meQ@y!UgJD`Bf+V<7LmeU3+RP4T?}p94@r#v;{4z!a4hMw|fPG_ysW?*HRl{1{pm@_(=8Fg_Ed9(o0i%LVD(+ zCE_?yDTJ7Dc!YD|C-A%txbWj}Dj=qyiT=i6oJvb0hoNC%Hr+|Crsn^}WXoC#3?-^p ztn`P#GrJGm|90SyB2N(aicfM`u}y*2LK<3Sf5n$OprefQ^4vK!WACbcf$>GAr#N<0 z4k6|g1yO%Lbdabw7apfkH1*AVjlf_UR?${X4aZoiQ3Rmx{QU2*D=#%b6}OOZ_Z6(> zb(VaQYZsca*PEkdS(Tw8rbaUug4y$Yq(U#(;pI=;#p@94X6VAaY|~ch?-jLf48*I%mKIb_Zb>&bhP8(aaHT+1v&1* zO4=^UuvL>IEB`T0Ys9T8L!FS*F)SvdR#CQ)WGh!vwlV(#I%+@%UlqbrU9a3eA1KPV z%d$IgwX&6+nkonsAZU=dFA7f1&#T!lUN4jaW{03jh@4?+TAg6DYG=Q;DViH}b^S$1 z{gUI?z!+9HqlHRmlD0; zl1+rIS|^+0+OLdjW@|0wI2fL?mbB?h`{F@;{}fBDxHkgM#DW z!7JTNKvoO^o*NwiuJzP=hQUC>rP!EN?nP9K@1lW5aSn@G~li z`qRPDodLOuMAH9xCZPlij&P6UZ=N>~a}^zzZl;b^f*uost2^->JQ$a4np(-zZZ+31 zIi}Vm+Db?74HV*#!2n;Cy?L*z!tp#aQo5>(yT}bMJ+dN0P{)z!J#y#1b!q|3E+gPc z0UF3l(z-{ESp}k3g>xxKR_Qz{U1gUI(%cDy-33{&0=mq~IA!wCpD8Mh3}B7zrvtu! zBl<6f&Z{qG)A1ejC)i8yAIt(M&PljCSoMdjt#d2`US>O(S2`ON=KLnP-)@9cFR3BiDfcXPLc3M_6HYQ|p+OB% z3vc#)3cyiH4&=&m0ZvUUdSO_XTSdu5g6uxiV~X11ku$ z2;ZzRhcU6RC;^XGad7t+GkjjR@SeX-;q5ep9^K3T$@faMhpVYsc_L+mRKdC9Tlx>B z!hOb`M)`)7N)&0YVAp~j2uM)l0lwpTzB>JmKHlmBH3!)~AMKtSJj)DT&j7rY*FSVZ zR5dU$Fxr>9V)TGTq^WoMCz>f+(W|Px3Z~LXb=2aDBn^#|3DZg~gQSZSb{zpSuK$K9 zaghMt%WM6g`ZsrmfE*c}fTmn>H9aEw>_Zy|I79-vP>?WEnkSd{#)b#WTi2)b4^auU zgK<5L*y#eq!#5rrhrS4Lvio9ns*TtSh0jW|A}i52S26W|sS8ytGyV>EAi|cGMPdsh z%%$zyToV-RB?*LE6}F&ixIP|#dpcG-B34z#PL27WCu9vHpt;=(y9x5M=y2b)co`SY z>KvupgxC+8n&HMmrjEsTaTlWST6D5x+R(J*lod!24FW;w=l|kw{ugJwP zI9I3)hMRyR`t%R{hfp`Y7+e^{w++yC$sS|UJr8MnJ;RSaUbx47ZIrf%Ay=&>c#WA< zlO}StKZU=yX_p^DHU53?znRk#=Q-$ODc=SdHA-X*446pIN zoWil!LNyw7u@7i}r+vQfi1_RYa##T@EF$_DnyMk#`|^mH`-2`kc(t90Kg}5|S(WQu zYhh@twcVlbTRkBIwBW z`^>dY{Q&|}u*G=K1G@h$3jY(Hf3U;G)Nu)1MzArpv-k_)iKU%16cNXdeF5L5-S$4! zEzAcb3(8FN&hHInMY5MHWXdz#KWQ7~<<#>jTbKt6*#lj&2q2uQK^7DqL{}}9|H`#{ z+@d!X{3mjs8OIXOx;nhC+l(Ll-F*e`Df`|RAvfW8?P*tw(_)uf z0>m3^su?KA|8q>nA_9#$%6b#5T}3vpbRS>Zd2mxI?oTXgnr0dg+44;0g=JNJh-fi; zUB1yGn@y5+d3mWTZdfCZb817ZXDv|Bv4GaFf8UhA*>kg9m7)?e0E=sE_y=32A1f6D z`sb%nVt~N|P=*R0dg7tx7wGI#9pU`L@08yiEnPd-u4C8dUD^&cL%0mVbm7o$xb z)TqtK0CCzsxp^kxm50#&xExlCVtn}YVwMv&6YD9~P78RPv6!=<4*^|E`A2rTaJe@u zTp9i(Pqs0$$>4kYE4=BWdgeyM~AFyyJo#4vP$_fTL%`kL>mR0Aceih~V87nmOp-+PW zfxd56+K)dVl{>gt-(G7%w|($#$&TyjefrY z2p9X{|I}^gxn2$an`3|b=mZx#cJwv^+h_5BtvSa@uGZextAQnGdU~77t8cCD8l01F zrY1ib!sc~*movE$U+dq~6JBn7SwvT;rseXe%6A{Pkb@dy;o%u${Q|QcL8yEGP2zDgNz06eURGq-!1jN8!*BoKB@Ez0GU$D?o0^q}yF?{fnpXYRn&8SI zf3>%1?XarKPX=0riDC1y6a_Ag7+R^CGHHTSDBG##kOgbh`?azY_;pt}plqHL@PTPP zD>t`vjdHzHN3eg?_>E!Jo0h|ag)9>dlzHqe*Bqbzj z4>wXPYBb95=z4wG>c;RDd6naw>xhKt5g8I0Pu)PF-A|!17+|l4z#^u(Ia+vT(F^zw z-8lVe76EvyNrd?<+D%?YM-whdi_ypS{ZEDV+e;SP6%Y~U6)mJ3>2Aie|L13|| zoFIy(@x0lmp4;0u&ke4QPz&g?!no-oK))p9|9Q9EJ7DlKY}$J_D+X%{kNCbsMmNzc4NxTulF??VV-7tt zgfO%XDSnhsrWp%QkBIcrEDqT%@Eej6Bz=)w~9YKc9_+jc8LjpI;c zVJWGRz|UNc2O0Ra_}jq%vkjy^gMd!55?)SP-I#W5x*~@Bpwrp&D8m0(K+ej6c08`V zP4o7XRii@Ag<7&W@wwGzq-5a>`RZN5{!{Muqhbz8_lYpt zzLP>^ykGHxnEx!veO!GPpoe4>74=*96mnI_?mc)Cl#!8xX1U z#z;W^@_)#ypj@d6xf6vrwf3=C;pX}Wq97n4h63=-ZT%`Y4?6UNtsL!-#&4>fWcl0Xh(fc-whllzp* ztnqVPdh`GHjRUbCg1GTb%iB>}Xo{efcF2~SCJ4L^DOq`AOGY0-Ra(uF6-RKrx=oS% z;CI{eunLx>2rx;h;NZH;A^QSAkhy)7@!O1>D` zV2XK<)?sQx2`}Gsk-98q<3*;XlUa-B^xSFYv-s^Mg3Z3EP5?PiYpyW)R3SkgA2 zAR3sGn8^E!{~p+DoL^m~z1Lop-$?qq=lX46leY+sR^XT(lY6*cp0&Y5BiL;> zU+Rv_%ge*I1gA&k^|WjE=5<$gwfUE|%z6PpZWAiX{lA>z&C7WEM#3g%2kkAtQ*KYb zfaaxun7hnUA1leCu4|YvyUY)KOm)f?`azYg*~M`@+BC>^J_p^sV8_oGq;dt5k>*#;ujv9V4tM_N7!>v_u^fyd56A^8$+&pc=#jy& z2T+dEYk&YLASErIxnmFmbw$1{msynEkpLkxqk(S9<4;ClT|*y9CVSKN{hu{PEH!8l ztvIot-WF_u&sS%%Ju@t4{D{uzd?6pDp%syUMHzk?Pn#K5yiv3yn@igAbwk8Aq;#>F zm#WXyTMBsu@nCC86?l))03#Dy^&rW&rlzLhZn^D2yPpU2R=k5Hw+{Ts!{k_9Xuzsq zmnrfOU|~v?=aL}R)sx-nDCha}=c{BuPT@Mkwj1bD1c8QKG=a?Peq%r=uJ_$O}mqa?BSC=zToqJZ0X2=Z`x4<`eI zwBzA=YD5ROl!!>5UF&hHXH&}m^Aa^I!77y4$UA@P@o*lni9g*s_~}Rm$*(S1&>pT7 zEf+8Okt2CkxSOJZs?Dcoc{ajM_iQL2|LBOBYYJ}FgV(g~^g~;B_wY<4leXhZPeO!N za!v4G9qEL zns}vj)Li3AxnF6bD$amesRvxn%d8LEoa8^P0K2UN7$8I1JAXJ2z>U?0p5x#^fEi`@ zw(D^<3^yyCuWo~V>5`sCUk^xHqjyYf(d2EM1CBKlR z+o9JHzGftiSP!Y}A5RVb!CpOh9O`(K1q-);^u}Ht_Yaoo&Ru3irZ?q29p&0}3fflOk&(jCyiKuwI^U z!)U>>V(P^$@NgBz5QMQSidih|1O2vKYVkUm$z2sOa!s-*?WAxA!&EE(Lg1X=@sPD< z3M6#r8IhgZ43`(bK|cp!F$icuyq_7%Ep}oz0tDHR>(hkm@0c3)yi#wRfKbOF{tVku zK%A*H>D`U}CUvyuAt%i|e(829sFiadX)XJlM{ zeg;UP(!z8e_{C&(yJ$j}dq=6LFi2c~gl(k)7Dc1K3;K(BWQCMi_v7_!PIIoOo7iTNkl zEs|~~752IA=T`^)M&6MuATQ3c!;!r{EjnA(mwD$fZimq<8cY7e)+vVB;m5_+Zre?k zq*(S3?aSOvLd}`92$N@S?}s|))(LJZfcywJj0P;S zAzMY}4CXrvszptUE0t|Nd~Xa^{$mOzG}6^IqaMQ4nn@Z*!@+MZu5 zGuLMl&P*3h^^L5bUxX=Yd(EHMI)!MAykGM%Xs~Pu?^S!SkZAO`A37iVv`qJM-CoJQ2N? zpq*LNgo1&}0Y1j{0{0caimEC;UKi`s(D$Yuj(tCv#7a?daoj5dPw7EZ7m!Nx>vUcU zdA$Cs1)$?ep5QbFw8dslyX_W%mRfL95^+SXlt0j>wDTQTKLC>@I^TtM53m}}rPy&j z8q=qfklCzwVDr3O6mPmvN2rCo`h7O3%web7UPyx1xby)2NTzK1A`DW`4Gwk-LFmcf zGUms`ql~!cZl}jnd&$h@tpPgSr(l1_nrN%;-Uv@aaXpWNsYpWfI)B)MX45`L z)h;#?2mp4*FiUZ_cVsYUZ>tV+d6CE;=~v;h+g zgpC4BI%I{swrA?!zcCpnU60f;tDJV)=7f#LpNNgixA6BQaK8tzCTgIM^{fybtfqjH zky`lTA!QK$SRXsS?3X0r{&^Gl;1)-j6`;j7EM zK1O9Hde7URz#{doTFw0RAkf2B@F(+dwyoDR2Et!DLK=xg7fN`ohtnBuvQQQJz4+EUU-<14Gau= z?b2miUR3WpLQ6*uX6^wx6T=qT$?jYQq6{`Bwu|!V;ja|H+`?*>RKlr z^G0GYz+)XjTdIDLVHidgnH!L+md%<%9y;a)1R?c(X^|E~3JFE%@9`lKm^i@M?%~YU zQF-XV>`VJXLvOlj9D7NbiE?k&%^C zRz<(O z_uEaH2j5sEz@nj}{&J$;o739|K&H+Dj13jv$PaGIK|YG(w`JR^$cl)MR1r9?60zF% z?h&@BXXhtF0n}`7_A`Nn*UKua9T3;FBv#r;T7kJ^L<1 z&}Ub8kc#$b@Rb{=ZmPvWphB7|0e%F4WC)nFug^vT&u}1gbY%`LKY=jZ8lo>`H_z?* zNoSV)@_JnD^Ity6%+Ig4H;Uk%Dq zTD3;*Y3H+P$z%FY{dAwMA7p zB3|++Mu;-~I{m@X2Y@Z(Yva%$J@n6|d!NHrqzUANY5|sO>Fg&WqjF3SJgvp|WIOe4 zH~Bk<0 z@1Q@`xR?j}gXo7G?A3CpiJ}H_B1r(-TI`s8W(K#k5B{hPPz$SKE9{Rpl`mPe!|vc> zV0^G~PR5br+XktI0-{w)9U(d^TWt<`Rce(Y8V%h%yB=dl+R4Z|@ULIP?-xd@5XL8DW=fHeKU#1&yJ$T?qHAmo2-0oF#af%1P{xOZv z3s#+C!3WtHbj@WtMmr+?zV^n`x}8c)=+{$hkIxy)6Mu`Zy!9ph#k(2qUcPL}lB38( zP_K8VE{fq<&n2BSH7!HKWnN#qwAo<0Z2ex>v&p?*f%eNG5=wi!ui@Ud95k$c6 zjF6Hck@Yc#P&xCAvn*?ZNYo7P=LguFoh>aw-Z(&UHxmz`8Jbf_E@|(`Y_Ww#G}x>H zKsXF;SI$6?4$bZQe1^e(XZ9_3a+^Lg%B;xR7@*S50z39k5fKhEI&~-WzP=LM1ruLB z>g1YCl!358Xbx2FVA_dqzxptKtCPtS1HUx(wL2OrRNRoZPB93h7?~9=(jL z`btVzKm^x~cam68+!^>^#5{+z{~-QQv)|2L2v*HYGOWgS{hm4p#blS)* zcdcGa$`_1;Y~>QGVNkr4`iL7_tD?GgLKbndyp_x_Fbid^61&sU80Q&si2o~`Z&PGitO@=OJiEB?##G zI4phv-EF=0=kn|4V9;hNckt2ABlHo5R(i#fpg^QzCNF#`aYJ$W5jfUp@sD5=JzD3& zPT+Q70ThIzHfNMPdwbt4<=F%M?Ki_qHBe%6W6_T8?%y>I`{D$|E9zI|0tXYZ*ikwkQ&gNbmcvvQE_Y1=+`2+J+e2X znlts3K@Bxs&-s=aT(= zEva+)9Hk_BjAD^k1ZkW(DwP#E=CJWliuLHYJojAN#Dn>wQ4GJcK+K#kcDvK&tuf6Qkabu9^J-J$<1wfVsy#5d(|^yt5CWR$yCBuR7OVMacqtRC zDq!}zT?qGgMt1h+(A2!#+}FlMr^%sLZ{*3z13CWE6e5TiqlQkg%GLvU}_vH$}ZBjY#u^p!zwe7fs>$Xr7)BX zysR$y*=|C?aJ|B$i(aB808RBaPwlKkKtH5oHhCN#8}cW@(+Ki3*=*My$ESWPZe@{| z4_K>nS?wi?l&zBop6nCRq1I(`)u7F?05ZijS7#gHIQ_&6Fv(PVkUD4?!=Dgufc}8K z&~%C|S*Lxa8>akcKqxfD>T+hy0c4>tB)EaF6zWOA312)qyGIhU zG-xU$rUkiueP~|*YlFwLyYFDZ!~8j1Jt6D8^WriRRvk2%5*GRJWT$nTr#37>wfxN& z(Cwts_gJW|=yXSZ*q?Y36slI55TT#lLp|1qxWt( zU$#6Z+P&(pUBw~v`yOpz@r7K&5ApHK74stoULHC=4cjq2U}qKP9X~ulD@hbA1b90P zt_Dti`2EkSORL2cx5Zn+@3B%+F>euDntmWIgZ90FPd;zfwoXtg>CenLtz_L{{qr-k zuzBRFmb0CqZj!s9EJs-i0O1#z0OMR0dLUZ6CNC_jroKo?n~%Z+-sxoR8RSZHVHu)J|{5QAdf>6ywIoD7L?sS1}JLr zc#bhhCot0TFrBKJ0~iT^;2#w=wp!;4MP~SsIh?IXFRLD1^S&vK! zJ3FK)tNAZSyg#bR;X+O>zi0?|Q2we~q#^w{FIZcrM#!cN?;RKc6>3>kz{>Q-5F!^K z;UaG3y2EOKVaW@YWD=~}quPjI6O^Sp7H2n&2<$F|$39Sr(Gk(Us(;>MDH^1zlqmXH zQDMk%X=CF>xA(NhmI40kXD?)KGEFGf01lVN1#>&b%6F^azdIV>csw9iAKm4Ug zJnif4U1{!lzpPb18+eL61weHs$YODW9Jyyua1R*L-sj+;xmM0~I^Ghcu>o}XxBJ49 zlBU)P$y+;zhw^k8!y6&9K0e=+IF3Q&X)D^7+{i3Mm|kIp9vAFvIR= zYxq+VH#$|=(P(~kIA%@Ii4^Ja1I?e@WmCTf+W(C!j=PTm>ioCKI*Z;umBo%i0=Kwvx z0V=q+6Wtu(G@`)6^3P2b)8$E3WujK>&qtcOL~9~tjV~VB8Rg{OeaaGCFV#G+PS`lZ zj!?@~E@+{#iOvpBbQMk;qwD|LTzmt06%Y4`_l*CBN3wEr^n`XADt&yffSzLs$#^Vk z!nO4s6Ozv*GJVkt+S^#xy#jv55+pA$3!9a7L4lu-+nqZ2=24_%J`{=~3D7NJ*`nwjF{Nk=MEA5MQc^NN$=NtRsF)dZ0>!)N1Bd++P(i_1A2`Lek0 zbH(SsRd@p#;$Rpi@$r<=JH=QV+so#SX~8DWnFjWfeIKYE?iUCM@!$2W5_i}oTfNM$6 z zOZJ2)8yf@tD%3AuzF=+4zdQ#JvYnCoEBOd#K^kUY({QmH=*Gl00}8FsdpZUN8GWw9 zlMW|4S|N>g>w|5&3E|cSB`=2YzHfuGlyCbWR&6XAWrL#8F(au%3xQ-6s!vM(t0G=O zq14z<*#F{VsBiN6Q3TiL)^m)wkrC|oIiqRyL?TwKqIoi7KK{4){}AC_&Z^@|EcHz_ zmJI*va}q<^`60w`!{~c>v5=MeWJ;4S#bXm4`~(eqEYuE}<%Wp?OwObS@l@Ek-k2nx z{HhVL)M@P4Xgn(3cvKwj7w$#boW2 zDI;tbEL?^ggSgiZuy=~+B;8zMBDm+&DhQ;WL^q``>FnB;_g3FMIdSZRJCcge4U(EA zL~APOt#C!NB{%9e*T+RSC8cDDLVT49%a^MtyM3pU&l+1gq>f`*5={^VzCO@wPgR#& zM3<*5Z$4~O9(7B)^YrO(4{dn?++$bP9O9bNdVF#_*75% zt~4mOOo)sLZtvP`Km2h1`@nU>g{9gQU89&cCe{n2aEz+n==7C~sM+}gc;UZA$5c3Z zA4|Q^7UYa!k&%pL=}SR7|Lu=!0#N|67}TW3LyPBr2oGt;I#Hk)-B6V+H~+Dd15rV#R-N)1bIcsf|>f}s}{vKIUOI3A8TZnUkrpP z#ahdKMJW14J*Ult%J!}C^RMROi_|A`z*x&2U}u%i?f5;>tZbLBImbYwRb{>Q3EcN< zGm?bYzC1P(_-~*c3T{SD&l31792U3{;@Lgw^C?i-zEF8=BXq?L&Un6;=w51Ex&5(7 zHGLZL)T_m9e!d=2(O{Qx`>co+)s@LQG4YT*s9oT4y!@wHW!3Mob%;){%6}AaNF9G2`KXtDtZ$;oU^g*= zaD*hZ9VsBsSfzjP0{*0=pOZq;fE_N8w=R;Y=Ce;yKg4)6Cbh}eGCh8>P#>z0(RF&6 zrUOwqb<$DFr{vgtVyhKNo$8^wtIPA4O`W!9acnk4i{=tIM!?NXV!fyPHzLemI$E~oYDN`^6n^Aj zFxdt*zg(t4C1QKnIGDaN0P#4O)ckzMv&WAc-(R08UI53mS4PHE{4T(aY8Dvtda#)L zkfdEa9iQjaQTVla^V!1AfA#=gyb^+Qho!iBeop){I}y9$%`rpQT`SWq6$iYVj0xI@ zcR45A7|w5RQb8{q+!Yru)}*W7BW&{8odN|$$6`wj@vgDY<7d$re$zGlzU}*kTu>); zQ{ucQ+nHg0!%e;_ZL3FfQ$(M{-i*9J&Z)!;kTT9s60mZ4RnY(y?GloRJH+R0{%CqS zj0S;YVy@JxO$v*dbk8%Hg|5xvP|Oi$KTi(_j+G05@P#>MbSbg4w4_vGfYMa^b;+x1 zMO|6l_c~9c`BMUt>gINORhb5&Q-lfEH^fG^+3xHrlHQ(`=Vwzjr50O-a}Gv<3uU=N)EFudda1P+TRo$;Kw6q2zo zodN)TLko;mAd6<^z(yX~Q=W>K{+giRUp=)u0eq*0Jk#P_&X&)7L!7LVsZv@aN7_ml zn|Pjh=xtWDgr`&}!4nI(JwNkj9u+~7D%i`CSR?0eX}U*Q|Yf{->NmAFmPU)HcDR9c{t25Fm{umoT?vcGlo= zPgq3cC)ne6|IU;>3)|a2JZ#1_t4S1P9~oi3thO*UrQ^ChkVPjBht)@HLg3aY1_F4< ziiGywvfe+1ZUM_(D|PgzXza<=Ns){Oj)I;>spzj&rk`HKuA1uyAclZ0g{K3dVi9Fd)Y&@q(HbVwGI{P)|N56#P&Br^3u~zpzup#x%K%bj5t*9h z`wHartrc@d7KBvq@jT`hT_?CW9r-24dJ||yneUENJTjeGaUX-d6z&};UI;$TOPr?f zKx_MXaFR1it;7p6)wzUryzP^m@I}xCnKS2QAE)Ci_HGinrZL)c9p6xv^ltd3p`GBm zLP(a;dVz$M%JY`yu4wZ@H7ouS+wA^jwS?HzUmXMZ_o{u&8F2Vt>sQ{Pp!Wn_`FkLZ z2*E@#vZ@Yhu!@KS>siH*EH8oKt|EzPThQ4%t>5 zma=Ygs1|~~qkW<$`IFF!kG-EMAw45seD&J}w>u_%iq=u5Mr4##1Jo%-GK1PkRGY6M zYIr`T(w9V%-;KK^8dR-V@usO&1pO6o9Q6$ehRO7`2kMN4mEnw-qhyzZcLq(VHfJiI zzBLH&ZFGE_DDQ5i;T6YXK@3o&-UI1Us<_*NEj=9_jbH-^6e-Eotn0H)Lmy}6U)nt! z-Ykk6=y=ctJ*ey{o2{%yE3JQMr^tz}#{6fh6D!n!r^u>`=qBLav^)wwIW>iXg=J7I z$Wo*v-A<9OQu+vn@2}<2^XhHjj@A{e+A;iv->^VLRRg3G11p2y9;2mgCy5_ z4)u6Q&;ahOkCzvMtf%E|Z+J7ApoHrJQTT_|8#_T}q2V)D>%DJw&;JxH(R{#DNH(6#X0&6;XpBykWALhz^_Uh`P*fDHhai?_Q z9f3wnHJa2UqdOVF(|0GrH_02+)}0n0-6@tCls&Hmu>Wor{cMF?E>v9&>>M;9?IftywH);IXbG(BA{-SiGY$D{xqE$ELz4xydL*tNGIoT`uFXo(nP6I z>fBQRly29sg%#mhEM~Rn%<6k&uvL=%(;i z-dSBeI8bQSz0oLkwyL-J z)OG=PF_*D6c4Xwe8$Ldpbpybse*(5O*`1BN|7;KN5_rVa(4S^AS5V_l}@WMZo;a77;X^c6%Zfgh(csLgoEn!b;xz-Dc$Nfwr%@V?W z?QheUw|LcEa<@LBhp%X>uiHT;r#O1ahY12`^@-|puTQ1l5-G( ziZ^%9128pfmn$Mu!g9K(i|Xv(Wq127Ca24FY6O2GfbzX8vh!1*V}niTE6r0LPJvo= z0bovBTMHz{=8B3Ky)<`02R))wY0ED3!If(I%(i@L>A0=va&?O6$<#dmCLJ^R-%`~P z^<|4gx5AP>mBQeDoMv35vCFfrQ#xco;)-i#)*5^O6^mlIQwKb=Bv{(*70t}dyauYh zP{?37Rsy}4^mG%qb}&d$%=4Ugrj!f@@HlIzlhhzJ zDOJt7Cz97-NzB5(xK41%(_I7FC^yNR$^0yyvdK}RBr-JCI|ptck2!p{=-Qr$l|YU!1}K3;oW+yS<&>|deJDA8_LHReii%LsPHWC~0z=sj9LtA*zQt2-#h z9uCfUU;S4sWBc;OQ>A@t#AIN`@el-E!Q!;)qK;;gu*-5T=der zraV&Cl-w0rssN*nhedW^(kgRo6CsD8)i_5e-?|8GtRf*Y%|Lx=i?Dqc$?nQb={O{5 z;LFtbHy-WrlX&3{mfT9(50H`O0}hw=$LWSvqF#^H(<2o`ODKN{3RQ|R(9k>qB==Iw z-rluP^_Tu?1|j`AK(rTg)9UCc?$HM=A;b77x_$Ex4B;lM0f_t)6D9%Tet&{tjzj}q zBET?l3A{@!$DIHQOF6a90r~el05mVg8@0u2nDy- zAKc!fJ_zq+4f{X=ji-I+y8Pd((QKBYxxS?_b)#`rGq$i*jGMb~A7mngw|(Vps5@iWvfV z1Y@OIfbJOB2=?UY#3qJR2#CTwyA4^1<26bR(d(|xjDQDFP8;1BtmqXYbol}tR$1>6 z_u_HzYI-blMZ{z#Lr=PRj=qF-pNWRW3O$W{sja!ZLqmnx8P4KZ5r zXeLCU5(MPxF}dpF1Vkfp)hbZ|!S)?8rjW&m1`1+c*qsBeUFun0_Je53c6 zj{#n~dvPY-y&7OL7=TY0{Qdi5Uy?Z`AS)ZN)eF(}Ti_Xk2{4hw0fgM9w$Z>n6Sdjr z!+^HX?dOGRt^pTrE6)^U`M|EZ71`~;kkF^L<IW zpC&dnR>;jwcgIIVKs5L)RweoCUPO0OOu2Jly?=tgh|Bi3&=0l)SH*%IgOdai+m{dW zFAwxN4h~sr3g;PeCYhzeipvDKVh7IPQ%Lt|b*i+76W%U_)T`x*zP0m3D!Xc`?*`%i z$+dcj4|m<^t1Z`$WLy`n^dKY~+kT`oBBkpl0n4RtF)=1rbb1nCRw+Z)nN!H+glBbH zhISM=ENG=tH)LU1y9!tF{Z9Z;;o;$;I9%;(TtpBGz+)I|3C0KE*|=?vCHyI(mr@@_ zZc-B5{rhS3c|dG=1Y>P3;=QW}Yif6@@osv8t7zi9V0UnZ_O#uMOBT}sSZBgnsWUc1 z_W>%suGMSuZ@MI%g}~>`Wf^Gdn=3Na*bWM1zFnY%LZ-d8DARo$U^8d;X2f3QzIkER z7q=fF35vPhG0%z@JbK9ajBkJYK=i^qMy_rN3%x0?#n&}o(r}IZ&|Jyx(F{0M_ksq> z18@AaN;>i+r-M84eB=FAmK+nPUXPwO_$RYxw^C>E$cTnps!!V0@Sq{7aF@savZOg# zD)8AEu2YFx#qwLl_m7A2y>20xK)kQF1S@esVemaPRM!RBzq7!gk9gAU`Vs*F!63!c z%9t)ct2xbO>{&5ZB|nRz)Am~fyC;83Fn4NHtT)D3FoPcJHSWuU5KG2apg{x|O~d$S zA4@1LqVZUg3w9~*D7ZL%=dXBTI)nBDCe6`g1T$&B%NayZhbNNbZI`-8UT4D3JEi35TQno zb0#G`WJADQ67mz=2+Tl0JdjRm{;pEFI*tEpR*PhA9yrUr$m5au-HVZ1Qu16ZoaQI+ z@;3Bcvx%|lnR6Z-#Gxn4RAu}iSpt1W_nV@OxtO{7&p6863I(}v9x!SQtfjfHPLlgB z!zYzhRega^Qg<-FB@Hg>Xt^07DKHqzG{RU$&;V)3IAr@L=$j*f$+idq44po-PYr=E zMtLS%uMTFS)m>jX!RDBh)YPw{%POdg{uz8#kl!_`g5Y@JOrl>_`7;<>RtQNU*-bht zW7gDCj^$a^Up|_cbk-et7kTsQS0KYu{;IB3=53?`+s#z{iElRY42eDX85vZK9y!o@ zA53PFdO5ebP%knh-R*Y_Zs*koOPD@iMiW=@L%cxhBGMLnvqFT7v*;_*$d&0RD*_DDWs??Nf6!~Jtjw{bEwhh2xBX#7sAUnSuE z^x-xZPI2&@EKfIalN=mzPqXdYGgI_pjt0DV8C6r?od(_Gt;u}qfq4>621SSoponFB;_`q74E;$C>*$0c@Hq&pAC;Ha#0Af zhMVO{JiN7Ueeuc3qkxg2F(5RlQ`>k=uJ$El0SAp3-IOviId>uV_uo!N2R)c-=>MdO zM-WlKp25b$3*OnW$@&R49kKeH3`#$lVgP%K$IAiDE%8z(E>qBD!_ZZKIll(*r|=8EUq8JCiXme_KUudIS|6)%Tq(7g zDtE5ZT;Ke~N>eC<%1Z3~&XJ<<&tye0H6-uyfgte96pkakC>0xS;nn1u@b!-^?hIU zcVR}B$Io_uSWXIZyo~i4p<);|^B{v4OX^#S52`pz!(W|Fm>sw(FB7Iio4NlzLpy}C z=?Kw#^)!y@y5UJo-fIX4;Wt+JBa@)Yq#9cQiHYmpw;gUDa|FtrL>^mWp(m;PCN5G< zhi|Q`4DGB2cUqmexVSWbzAF7BRFnQE!`X<5rYr(BF8MWBrNt%ilfb|y5VqXQa;BuQVoTKgRxBO)T&@icTZLPFe`V-{6YOzn9&jc}Ez)b)t=G<1X- zXI{Ob)0mUU?H5Bf6@_m>O+B^6k@(Nm<4Xug$i=*D$55r3QHnQb?&^5gKQLJmA5~n) zC{kU|HpHASU-o0t3Xjvs#zC649Yd`cl1hV9QD1#d)6Dj;wKwkk%3~R14ko4o?xp1$ zQreeM5FM^1D8IpLog{rpSF-WFUS%rr4uAm1uPhtx4lDHyjO->^7k{bWZ-R`BJeS9` z9rplx*fkryxQpwym_A<$%00xs#(QbjOjgu0_i33WKcqf4tIX(6^eGlw1Zl2Ap1tw) zwJ00DMUu)CXIDUz;(;ir3UOqp?dfbnfgJc`SIBqFMq?kZ1aC` zb_Z5?FA&_j1%m3Kg{yDm1*llFWN?Sfh~Jx!c3DfgwBX%MJw8s+jn+DFCO#$QA2aO) zOh3&^8ogjJvt*;dDjC!>4c#2-QcR0LhA}|{Qn5;}@k|A^2uJ@Cn00Va&>~vR<{lR0 z6_iL?*_E%=)yVR*r!jo(ieIeW9%2xH52H|R8hQPsX#<;E6x1y3^wdzbS^N6q%DXr* zPHhvO+?FZ=zir-k9Y z>Y0d9V2AJH=XdfEPMc_%ct>I2G0OmbH2AFzFSF-)J^;e>N75(3Q}S;e`94olkG|6w zU#`cHJ_N&1LctnVzua0Axg6GCO^pK&3`?FNTJ`$frtd@rgw#K*lm4OMxYBZ^qN@^W zN~b&f+V{hZ@^iIp@4>Ok_{r2Jv0#OH`bSc=Y;rx8J!2CPcbiSW=PW{*@9q)CVr4<4 zm@yh@xCyqoyE+uLdn_4OY->ii!%o^!sKG$3R}`NlT~F1sBaGAAYc9@-VrGz@ITHf30e%EhKq|X|Ry`L1b7M z$5eGK8E`Z)J5mN;3$-!w->-!UzRE$3>y8V)yCesm$7wN@$7aFqR<&2#<%v&&m9h|d zQznIk&?bvwKh?ZmN<{n&;FZfd1>KYnbX8gleDGMDM$ny(enX=UHvmHeZKMD(Avtrs z{dYlBESC{)fFkA6q#dlwCFHhs=j|vjBizdH_0K%}yiqbm{g?FW6zA%}q0IFXCn~GZ z92*zi(8IUNbr1VclNCh$Vr0I}V$f|>ysZTgDfmtlY-W=qN6p3*$xA*Ve;envWQ(o^ z;NaZXYSkJhyeADBT(9o>CJCf$`FG%HEN%>E<&$cCMzUNIqoboc2WY0l@c?hN1ML;x zY^*)>F<1ZR1AE>M~`09}DO#s!ek- zuL8*g%MGAUS`b1KA=fTLpYxykFwS|YP=QGAewAv;#+#W6hmUTw$6Pmut30M@AP|?! zacT4?G|y)iZ-!c`4AT87vH0%7xm#!=O#IE}QF5XP;$*Gfi%EXXZK}BQZOPR)0axs) zVJdp`)a@dz^nGv3#{h0+IDhJX(7!Br=zrUYPf>Df!wa$Qjk0+Vd!9#e>KiCD1gBko$>T(dKu&AhLfGj!~7`B4hE382)w>Kfn zrriNzI$nhoJ{A6M?(yiER7gO|889qCENQ+G$hP@RmlW{>h6fa8wiCGkGNt*CO7(}> zn;%vRZvZ>n7&t}a%RaSpfJIQM;vb63LG5he2|o&>q-~NJk{jPk`(T$_S;w7G~8d zM*wJIm{Th*_E<+!4Qo07!$2@q2*pyG7m<$QT8Nbt7Ewy#l8rQrQqAz$yFF{ZwRO%D z78b^OsH$32S4Rx6j!rw?eWgKlsUFnasVyxzLO41=)eBs)$&J zZ+0X41xokn7qd$TyEGLqvPk_OrA131O8Xm^*Z;6?g@9KiWWKOhYW;oocGFuVE=%f; z2Y3*7--tW=l%S<~HO~>YL6#h9=)Zn>^ht0L|JkInMo>}%7uSpURvHHQw9MUpa+P+g zM#0yg68#k6@24=dUw&7h%`v5_ZxK4on2O^baa=frm+>ieVXx~%EOQ(m9kT6*amZcSG!nWOhETZ)%xh-wxFgaRmk&s@Vktk;FZP1#u~Zo zoS%C%{213c2S!#Arz_98=np`DlmP_aHET)M4qasK3|&vz*++SsWIikbZW*D0|4;64 z$bkE9!r8lb8C4-L-4D=A51BF@ojC&q4-@Y>@ZY#Y1MZ7wgbVN$>jH+B9AH7Y&3waD zp$YLet+?og`^8TY?p&2wwYSW>yBXgXTWBnFHD!Kn zGW{ro0aZ`)eMqpNNDBbC^NLD_(NyEsV;4lB+Laxzh25dvFK+26hVFvnPkIKM_ z{sW8tOBY7O32b<)pjcCb%9!Nwn=rgZ!yxE>Xf{TdFI4d^^gTYsh*lWBWX&WUGjlf+Lc4kW7KbW7BpvZGb$PfB4_B$`WiGYy~zVO&aU<(@L>x|8^IM3)QUBUfS5*f zv6et?ePb2ybW@T^Sig#bEKBEQYX_hcM?|C48BSW4G~&ZfGp-k^_echfr(4n+8&TKq zVwIJ!<21xA>gD}d^|ZfU0J{#xl06^(Iom~axC=+$VW^~>glTbB@)uSWQd|reO`$d+ zRoHjBv}dOaRIFHHVg!ErkG=1>Rgl0_>w4c@tW)u>>lUi1s~ZC+KBm^Tw)hou8=FF$ zQb}p)f-_}x_4hb6z3XO-Vog9^2f;9NNk;hw)8l6Op3E{X>tk=4nVTo2s85CwDv7tJ zg3Y!q6d#9HKxw1x;Ks*E7AiKZUjEEt;f*HCDH^KDXP8vHCCFx#(sQggh zV+qk^dwdt#_2j6V23BKyb5Dl2oexMEQ9>=kUW2~Zeu3O+FF0DL_X?Vw4S;AwCa}Ku z-Ha))P}YL)aL)AxMF|*H$h15kJ^2j9$<4rrPFV=J`!(k2g zIP`cX3+d!OGpyB7UIsj(3|mtysnaO+<&OJFp(@&jhLjSe=iuRf5X|nl3Y&?k!FB~L z__E11cgDL_&AQD*{ZTDbatmfUV{8V}*S}HD9MlTCIpV$q_AbD-ph{Ot8kH~{OB5ZL z^Z<0RHZ={sF;ncfWgN9bZU`Wgr-FDd zoLNQrd2sL6^ygv-jG%mWC?ONW-l}~8<2#QUI6ojS@oBs(nWMH&{rTc}RYxhqVBJA? zntgDsmg{|hF0;#nOsEmY4-8k$gv%!#;YgG|v~bKYY%Sb@p9_}gCP@NE`wS$^wzNH} z$xzbI;SR=IA2(!;`hO^`J~oW>5t@4v|46w#P!;J(!02l3U=5qOUFUp%5kmoIbNl<8 zqk{aRmg2Z=weCc0F-MtDl5m6p*D{@NcNF_KtUrCy^-J06fAE4_*Hh&_ok(+JfZs&h zz~~k9D!H!A_XV?zLSwn$$gy0RUG%%~!S*Eu-+7aVT!G&;a0>2!I{HdeS zAtP(a$Z7$h7txmCx2z4NUzU}&@#QXq*4LA4ZQJVfLK%0E+#)UI+)wHL$VL(oauljvDV4x^fDupYZ7D zCR8AVyeij~M{nZ*p#qK3=14etOIlGnql~jGgiwSitPY0 zzQfBN-c95lQrUnD9 zhY=neJnfEi8!pJ6v8a%V>vd|KA)9>WLGcf#XXJd}Po=K7+@*DsMQ6P{HSneo&GKKg z3lzP8>B|Sm%4NIEfPiI|`x6tj`1PZ54N`5y+{~K14A~XUeCL-h8Na_;1W#%c*hfQN@a0F5nU*#sy!Lc+tt(FC;& zAwFxKmz%kV{f~!Q+S;B*YURDq$bZHa( zHE2l>>*MJN9nk-F!IJQ&8~K)en1g%ZF+QZYmCmSuBBis;+%{vlofc*{Af?~bQ1~}A zm86=v(UWm_IP|aOc?#0on(x*jknS_(Z>KRAr@&&4wnRqeM;vagD>lNcCMZgLha9TW zQ%K5_woI5@scd*=w&e9D{Jk(ME4xasJc`z4b&oK)IeiNc>1S07L7v<-H?}SlH1gB zeE8V~5*5`6DlRHxnIL7AH3mkG8&7>Xwa@(cfiT$|@ZFaHER3S08a|i3`9>X7!y